file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/threadblock/epilogue_with_broadcast.h"
#include "cutlass/layout/permute.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
int ElementsPerAccess,
bool ScatterD = false,
typename PermuteDLayout = layout::NoPermute
>
struct DefaultEpilogueWithBroadcastTensorOp {
/// Use defaults related to the existing epilogue
using Base = DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
ElementsPerAccess
>;
//
// Stores the result z = (y = GEMM(A, B, C), broadcast)
//
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
typename Base::OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout
>;
//
// Additional tensor tile iterator - stores t = Elementwise(z)
//
using TensorTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
typename Base::OutputTileThreadMap,
ElementTensor
>;
/// Define the epilogue
using Epilogue = EpilogueWithBroadcast<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputTileIterator,
TensorTileIterator,
ElementVector,
typename Base::AccumulatorFragmentIterator,
typename Base::WarpTileIterator,
typename Base::SharedLoadIterator,
OutputOp,
typename Base::Padding,
Base::kFragmentsPerIteration
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for VoltaTensorOps.
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
int ElementsPerAccess
>
struct DefaultEpilogueWithBroadcastVoltaTensorOp {
/// Use defaults related to the existing epilogue
using Base = DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
ElementsPerAccess
>;
//
// Stores the result z = (y = GEMM(A, B, C), broadcast)
//
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
typename Base::OutputTileThreadMap,
ElementOutput
>;
//
// Additional tensor tile iterator - stores t = Elementwise(z)
//
using TensorTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
typename Base::OutputTileThreadMap,
ElementTensor
>;
/// Define the epilogue
using Epilogue = EpilogueWithBroadcast<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputTileIterator,
TensorTileIterator,
ElementVector,
typename Base::AccumulatorFragmentIterator,
typename Base::WarpTileIterator,
typename Base::SharedLoadIterator,
OutputOp,
typename Base::Padding
>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 5,817 | C | 30.619565 | 100 | 0.673887 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_affine_layout_params.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/fast_math.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int Rank
>
struct PredicatedTileIteratorAffineLayoutRankNParams {
using Layout = layout::AffineRankN<Rank>;
using TensorCoord = typename Layout::TensorCoord;
static bool const kBigEndian = false;
//
// Data members
//
Layout layout;
/// Stride in units of bytes along M modes
Coord<Layout::kRank/2, typename Layout::LongIndex> stride_m;
/// Stride in units of bytes along N modes
Coord<Layout::kRank/2, typename Layout::LongIndex> stride_n;
/// Fast divmod objects divided by tensor extents
FastDivmod divmod_m[(Layout::kRank == 2) ? 1 : (Layout::kRank/2 - 1)];
/// Fast divmod objects divided by tensor extents
FastDivmod divmod_n[(Layout::kRank == 2) ? 1 : (Layout::kRank/2 - 1)];
int64_t rank2_inc_col;
int64_t rank2_inc_row;
//
// Methods
//
CUTLASS_HOST_DEVICE
PredicatedTileIteratorAffineLayoutRankNParams() { }
CUTLASS_HOST_DEVICE
PredicatedTileIteratorAffineLayoutRankNParams(TensorCoord const &extent,
Layout const &layout_,
int64_t element_sizeof_bits)
: layout(layout_)
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2; ++i) {
stride_m[i] = OffsetBytes(layout_.stride()[i], element_sizeof_bits);
stride_n[i] = OffsetBytes(layout_.stride()[i + Layout::kRank / 2], element_sizeof_bits);
}
if (kBigEndian) {
// "Big Endian" scheme
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2 - 1; ++i) {
divmod_m[i] = FastDivmod(extent[i + 1]);
divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2 + 1]);
}
}
else {
// "Little Endian" scheme
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2 - 1; ++i) {
divmod_m[i] = FastDivmod(extent[i]);
divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2]);
}
}
#if 0
//
// Debug print statements to verify extents and strides are passed correctly.
//
printf("PredicatedTileIteratorAffine::Params() entered\n");
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank; ++i) {
printf(" extent[%d]: %d\n", i, extent[i]);
}
for (int i = 0; i < Layout::kRank; ++i) {
printf(" stride[%d]: %ld\n", i, layout_.stride()[i]);
}
printf("PredicatedTileIteratorAffine::Params() returning\n");
#endif
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorAffineLayoutRankNParams(Layout const &layout_,
int32_t threadmap_delta_kColumn,
int32_t threadmap_delta_kRow,
int64_t element_sizeof_bits)
: layout(layout_)
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2; ++i) {
stride_m[i] = OffsetBytes(layout_.stride()[i], element_sizeof_bits);
stride_n[i] = OffsetBytes(layout_.stride()[i + Layout::kRank / 2], element_sizeof_bits);
}
rank2_inc_col = threadmap_delta_kColumn * stride_n[0];
rank2_inc_row = threadmap_delta_kRow * stride_m[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 5,636 | C | 34.904458 | 100 | 0.584989 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_smem_accumulator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMM/CONV to store accumulator in shared memory after
applying scale, bias loaded from global memory and element-wise operations.
This Epilogue is typically used in fused GEMM/CONV to stage the intermediate accumulator.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename SmemTileIterator_, ///< Shared memory Tile iterator to output to shared memory
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename ScaleBiasIterator_, ///< Iterator to load scale and bias from global memory
typename OutputOp_ ///< Output operator
>
class EpilogueSmemAccumulator {
public:
using SmemTileIterator = SmemTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using ScaleBiasIterator = ScaleBiasIterator_;
using OutputOp = OutputOp_;
/// Fragment of accumulator tile
using FragmentAccumulator = typename AccumulatorFragmentIterator::Fragment;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Fragment of Scale and Bias loaded from global memory
using FragmentScaleBias = typename ScaleBiasIterator::Fragment;
static const bool PerChannelScale = (OutputOp::kScale ==
epilogue::thread::ScaleType::OnlyAlphaPerChannelScaling);
/// Constructor
CUTLASS_DEVICE
EpilogueSmemAccumulator() {}
/// Streams the result to shared memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
SmemTileIterator smem_iterator, ///< Tile iterator for destination in shared memory
AccumulatorTile const &accumulator, ///< Complete warp-level accumulator tile
ScaleBiasIterator scale_iterator, ///< iterator for scale vector in global memory
ScaleBiasIterator bias_iterator) { ///< iterator for bias vector in global memory
// Fragment to load scale bias from global memory
FragmentScaleBias tb_frag_scale;
FragmentScaleBias tb_frag_bias;
/// Fragment Iterator to load slice of accumulator tile
AccumulatorFragmentIterator frag_iterator_accum(accumulator);
FragmentAccumulator tb_frag_accum;
/// Epilogue output fragment
typename SmemTileIterator::Fragment tb_frag_smem;
/// Load scale and bias from global memory
if(PerChannelScale)
scale_iterator.load(tb_frag_scale);
bias_iterator.load(tb_frag_bias);
/// Iterate over the accumulator tile and store to shared memory
CUTLASS_PRAGMA_UNROLL
for (int rid = 0; rid < AccumulatorFragmentIterator::TileIterations::kRow; ++rid) {
CUTLASS_PRAGMA_UNROLL
for (int cid = 0; cid < AccumulatorFragmentIterator::TileIterations::kColumn; ++cid) {
using AccumulatorAccessType = typename OutputOp::FragmentAccumulator;
using ScaleBiasAccessType = typename OutputOp::FragmentScaleBias;
using FragmentSmemAccessType = typename OutputOp::FragmentOutput;
ScaleBiasAccessType const * scale_frag_ptr =
reinterpret_cast<ScaleBiasAccessType const *>(&tb_frag_scale);
ScaleBiasAccessType const * bias_frag_ptr =
reinterpret_cast<ScaleBiasAccessType const *>(&tb_frag_bias);
FragmentSmemAccessType * smem_frag_ptr =
reinterpret_cast<FragmentSmemAccessType *>(&tb_frag_smem);
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < AccumulatorFragmentIterator::kIterationsPerTile; ++idx) {
frag_iterator_accum.load(tb_frag_accum);
++frag_iterator_accum;
AccumulatorAccessType const * accumulator_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&tb_frag_accum);
const int kOutputIterations = FragmentAccumulator::kElements / OutputOp::kCount;
CUTLASS_PRAGMA_UNROLL
for (int it = 0; it < kOutputIterations; it++) {
smem_frag_ptr[idx * kOutputIterations + it] = output_op(accumulator_frag_ptr[it],
scale_frag_ptr[cid * kOutputIterations + it], bias_frag_ptr[cid * kOutputIterations + it]);
}
}
smem_iterator.store(tb_frag_smem);
++smem_iterator;
}
}
}
/// Streams the result to shared memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
SmemTileIterator smem_iterator, ///< Tile iterator for destination in shared memory
AccumulatorTile const &accumulator) { ///< Complete warp-level accumulator tile
/// Fragment Iterator to load slice of accumulator tile
AccumulatorFragmentIterator frag_iterator_accum(accumulator);
FragmentAccumulator tb_frag_accum;
/// Epilogue output fragment
typename SmemTileIterator::Fragment tb_frag_smem;
/// Iterate over the accumulator tile and store to shared memory
CUTLASS_PRAGMA_UNROLL
for (int rid = 0; rid < AccumulatorFragmentIterator::TileIterations::kRow; ++rid) {
CUTLASS_PRAGMA_UNROLL
for (int cid = 0; cid < AccumulatorFragmentIterator::TileIterations::kColumn; ++cid) {
using AccumulatorAccessType = typename OutputOp::FragmentAccumulator;
using FragmentSmemAccessType = typename OutputOp::FragmentOutput;
FragmentSmemAccessType * smem_frag_ptr =
reinterpret_cast<FragmentSmemAccessType *>(&tb_frag_smem);
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < AccumulatorFragmentIterator::kIterationsPerTile; ++idx) {
frag_iterator_accum.load(tb_frag_accum);
++frag_iterator_accum;
AccumulatorAccessType const * accumulator_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&tb_frag_accum);
const int kOutputIterations = FragmentAccumulator::kElements / OutputOp::kCount;
CUTLASS_PRAGMA_UNROLL
for (int it = 0; it < kOutputIterations; it++) {
smem_frag_ptr[idx * kOutputIterations + it] = output_op(accumulator_frag_ptr[it]);
}
}
smem_iterator.store(tb_frag_smem);
++smem_iterator;
}
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 9,073 | C | 38.281385 | 107 | 0.661854 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op_blas3.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped complex GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_gaussian_complex_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_blas3.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization and defines sensible defaults for epilogues for complex*complex case
// 4 real-valued mma operations (Complex)
// A = (ar + j ai), B (br +j bi), D = AB
// D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Epilouge Shape
typename Shape_,
/// Warp-level mma operator
typename WarpMmaTensorOp_,
/// Number of k partitions
int PartitionsK,
/// Epilogue output operator
typename OutputOp_,
/// Elements accessed by inner-most loop of AccumulatorFragmentIterator::load()
int ElementsPerAccess,
/// Multiply-add operator
/// Selects between (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_ = arch::OpMultiplyAddComplex,
/// Is for a symmetric kernel
BlasMode BlasMode_ = BlasMode::kGemm
>
struct DefaultEpilogueComplexTensorOpBlas3 {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using Operator = Operator_;
static BlasMode const kBlasMode = BlasMode_;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorBlas3<
OutputTileThreadMap,
ElementOutput
, kBlasMode
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
ElementAccumulator,
LayoutC
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator
>;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 0>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization and defines sensible defaults for epilogues for complex*complex case
// 3 real-valued mma operations (Gaussian Complex)
// A = (ar + j ai), B = (br +j bi), D = AB
// P1 = (ar + ai) * br, P2 = - ar * (br - bi), P3 = ai * (br + bi)
// D = dr + j di = (P1 - P3) + j (P1 + P2)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess,
BlasMode BlasMode_
>
struct DefaultEpilogueComplexTensorOpBlas3 <Shape_, WarpMmaTensorOp_, PartitionsK,
OutputOp_, ElementsPerAccess,
arch::OpMultiplyAddGaussianComplex
, BlasMode_
> {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using Operator = arch::OpMultiplyAddGaussianComplex;
static BlasMode const kBlasMode = BlasMode_;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorBlas3<
OutputTileThreadMap,
ElementOutput,
kBlasMode
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorGaussianComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
ElementAccumulator,
LayoutC
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator
>;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 0>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 9,441 | C | 34.630189 | 103 | 0.676094 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_base.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if !defined(__CUDACC_RTC__)
#include <type_traits>
#include <utility>
#endif
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
//
// This is used for metaprogramming epilogue functors. If they define
// `static bool const kIsHeavy = true;`, then the epilogue functor itself is
// not inlined. This results in smaller code and is advantageous if the epilogue
// functor consists of many instructions.
//
// If the epilogue functor does not define `kIsHeavy` or if it is `false`, then
// the behavior from CUTLASS 2.5 and before is retained. The epilogue is fully
// unrolled and inlined.
//
template<class>
struct TypeSink { typedef void type; };
template<class T> using TypeSinkT = typename TypeSink<T>::type;
template<class T, class=void> struct IsEpilogueFunctorHeavy {
static bool const value = false;
};
template<class T> struct IsEpilogueFunctorHeavy<T, TypeSinkT< decltype( T::kIsHeavy ) > > {
static bool const value = T::kIsHeavy;
};
////////////////////////////////////////////////////////////////////////////////
/// Base class for epilogues defining warp-level
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpShape_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
int FragmentsPerIteration = 1
>
class EpilogueBase {
public:
using Shape = Shape_;
using WarpShape = WarpShape_;
static int const kPartitionsK = PartitionsK;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using Padding = Padding_;
/// Output layout is always row-major
using Layout = layout::RowMajor;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename AccumulatorTile::Element;
/// Number of warps
using WarpCount = gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
kPartitionsK
>;
/// Use this to control the granularity of one epilogue 'iteration'
static int const kFragmentsPerIteration = FragmentsPerIteration;
public:
/// Shared storage allocation needed by the epilogue
struct SharedStorage {
//
// Type definitions
//
/// Element type of shared memory
using Element = typename WarpTileIterator::Element;
/// Tensor reference to shared memory allocation
using TensorRef = typename WarpTileIterator::TensorRef;
/// Layout of shared memory allocation
using Layout = typename WarpTileIterator::Layout;
/// Logical shape of the shared memory tile written to by all warps.
using Shape = MatrixShape<
WarpCount::kM * WarpTileIterator::Shape::kRow * WarpCount::kK,
WarpCount::kN * WarpTileIterator::Shape::kColumn
>;
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
(Shape::kRow + Padding::kRow) * kFragmentsPerIteration,
Shape::kColumn + Padding::kColumn
>;
//
// Data members
//
AlignedBuffer<Element, StorageShape::kCount> storage;
//
// Methods
//
/// Returns a pointer to the shared memory buffer
CUTLASS_DEVICE
Element *data() {
return storage.data();
}
/// Returns a tensor reference to the shared memory buffer
CUTLASS_DEVICE
TensorRef reference() {
return TensorRef(
storage.data(),
Layout::packed({StorageShape::kRow, StorageShape::kColumn}));
}
};
protected:
//
// Data members
//
SharedStorage &shared_storage_;
/// Stores a warp's fragment of accumulators to SMEM
WarpTileIterator warp_tile_iterator_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueBase(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
shared_storage_(shared_storage),
warp_tile_iterator_(shared_storage.reference(), lane_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to three coordinates:
//
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_k = warp_idx / (WarpCount::kM * WarpCount::kN);
int warp_mn = warp_idx % (WarpCount::kM * WarpCount::kN);
int warp_m = warp_mn % WarpCount::kM;
int warp_n = warp_mn / WarpCount::kM;
MatrixCoord warp_offset{warp_k * WarpCount::kM + warp_m, warp_n};
warp_tile_iterator_.add_tile_offset(warp_offset);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 8,279 | C | 33.356846 | 128 | 0.654668 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/wmma_sm70.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/layout/matrix.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for half
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
cutlass::half_t, ///< ElementA
LayoutA_, ///< LayoutA
cutlass::half_t, ///< ElementB
LayoutB_, ///< LayoutB
ElementC_, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM70_ENABLED)
using Shape = Shape_;
using ElementA = cutlass::half_t;
using LayoutA = LayoutA_;
using ElementB = cutlass::half_t;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpMultiplyAdd;
using ArchTag = arch::Sm70;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<16, 16, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape< 8, 32, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape<32, 8, 16>, Shape>::value,
"Supported list of wmma operator shape for f16 multiplicands are: 16x16x16, 8x32x16, and 32x8x16");
// check supported wmma output data type for the given multiplicand data types
static_assert(
platform::is_same<cutlass::half_t, ElementC>::value || platform::is_same<float, ElementC>::value,
"Supported of wmma output data type for f16 multiplicands are: f16 and f32");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::mma_sync(D, A, B, C);
}
#else
static_assert(false, "wmma.mma.sync for floating point multiplicands is avialable only for SM70 and beyond");
#endif
};
} // namespace arch
} // namespace cutlass
| 5,286 | C | 37.591241 | 113 | 0.615399 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/arch.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines tags for architecture-specific configurations.
*/
#pragma once
#include "cutlass/cutlass.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
#if defined(__NVCC__) || (defined(__clang__) && defined(__CUDA__))
/// Computes laneId within a warp
CUTLASS_DEVICE
int LaneId() {
int ret;
asm ("mov.u32 %0, %%laneid;" : "=r"(ret) : );
return ret;
}
/// Computes SM number the thread is running on
CUTLASS_DEVICE
int SmId() {
int ret;
asm ("mov.u32 %0, %%smid;" : "=r"(ret) : );
return ret;
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
struct Sm50 {
static int const kMinComputeCapability = 50;
};
struct Sm60 {
static int const kMinComputeCapability = 60;
};
struct Sm61 {
static int const kMinComputeCapability = 61;
};
struct Sm70 {
static int const kMinComputeCapability = 70;
};
struct Sm72 {
static int const kMinComputeCapability = 72;
};
struct Sm75 {
static int const kMinComputeCapability = 75;
};
struct Sm80 {
static int const kMinComputeCapability = 80;
};
struct Sm86 {
static int const kMinComputeCapability = 86;
};
struct Sm90 {
static int const kMinComputeCapability = 90;
};
/// Triggers a breakpoint on the device
CUTLASS_DEVICE
void device_breakpoint() {
#if defined(__CUDA_ARCH__)
asm volatile (" brkpt;\n");
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| 3,538 | C | 31.768518 | 100 | 0.60684 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sm50.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#include "cutlass/arch/mma.h"
#include "cutlass/complex.h"
#include "cutlass/quaternion.h"
#include "cutlass/functional.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, float, LayoutA, float, LayoutB, float, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = float;
CUTLASS_HOST_DEVICE
void operator()(
Array<float, 1> &d,
Array<float, 1> const &a,
Array<float, 1> const &b,
Array<float, 1> const &c
) {
d[0] = a[0] * b[0] + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, double, LayoutA, double, LayoutB, double, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = double;
CUTLASS_HOST_DEVICE
void operator()(
Array<double, 1> &d,
Array<double, 1> const &a,
Array<double, 1> const &b,
Array<double, 1> const &c
) {
d[0] = a[0] * b[0] + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, int, LayoutA, int, LayoutB, int, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = int;
CUTLASS_HOST_DEVICE
void operator()(
Array<int, 1> &d,
Array<int, 1> const &a,
Array<int, 1> const &b,
Array<int, 1> const &c
) {
d[0] = a[0] * b[0] + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<float>,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<complex<float>, 1> const &a,
Array<complex<float>, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = a[0].real() * b[0].real() + c[0].real();
d[0].imag() = a[0].imag() * b[0].real() + c[0].imag();
d[0].real() = -a[0].imag() * b[0].imag() + d[0].real();
d[0].imag() = a[0].real() * b[0].imag() + d[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<float>,
LayoutA,
float,
LayoutB,
complex<float>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<complex<float>, 1> const &a,
Array<float, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = a[0].real() * b[0] + c[0].real();
d[0].imag() = a[0].imag() * b[0] + c[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
float,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<float, 1> const &a,
Array<complex<float>, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = a[0] * b[0].real() + c[0].real();
d[0].imag() = a[0] * b[0].imag() + d[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<double>,
LayoutA,
complex<double>,
LayoutB,
complex<double>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<double>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<double>, 1> &d,
Array<complex<double>, 1> const &a,
Array<complex<double>, 1> const &b,
Array<complex<double>, 1> const &c
) {
d[0].real() = a[0].real() * b[0].real() + c[0].real();
d[0].imag() = a[0].imag() * b[0].real() + c[0].imag();
d[0].real() = -a[0].imag() * b[0].imag() + d[0].real();
d[0].imag() = a[0].real() * b[0].imag() + d[0].imag();
}
};
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<double>,
LayoutA,
double,
LayoutB,
complex<double>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<double>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<double>, 1> &d,
Array<complex<double>, 1> const &a,
Array<double, 1> const &b,
Array<complex<double>, 1> const &c
) {
d[0].real() = a[0].real() * b[0] + c[0].real();
d[0].imag() = a[0].imag() * b[0] + c[0].imag();
}
};
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
double,
LayoutA,
complex<double>,
LayoutB,
complex<double>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<double>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<double>, 1> &d,
Array<double, 1> const &a,
Array<complex<double>, 1> const &b,
Array<complex<double>, 1> const &c
) {
d[0].real() = a[0] * b[0].real() + c[0].real();
d[0].imag() = a[0] * b[0].imag() + d[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, half_t, LayoutA, half_t, LayoutB, float, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = float;
CUTLASS_HOST_DEVICE
void operator()(
Array<float, 1> &d,
Array<half_t, 1> const &a,
Array<half_t, 1> const &b,
Array<float, 1> const &c
) {
d[0] = float(a[0]) * float(b[0]) + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation for Quaternions
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, Quaternion<float>, LayoutA, Quaternion<float>, LayoutB, Quaternion<float>, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using Element = Quaternion<float>;
using ElementC = Element;
CUTLASS_HOST_DEVICE
void operator()(
Array<Element, 1> &d,
Array<Element, 1> const &a,
Array<Element, 1> const &b,
Array<Element, 1> const &c
) {
multiply_add<Element, Element, Element> op;
d[0] = op(a[0], b[0], c[0]);
}
};
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,096 | C | 24.628175 | 140 | 0.567502 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sm75.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply for SM75
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/arch/wmma.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
// CUDA Toolkit includes for nvcuda::wmma needed for binarized matrix multiply.
#include <mma.h>
#include "cutlass/wmma_array.h"
#endif
// CUTLASS includes
#include "cutlass/arch/mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
#if ((__CUDACC_VER_MAJOR__ > 10) || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))
#define CUTLASS_ARCH_MMA_SM75_SUPPORTED 1
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750))
#define CUTLASS_ARCH_MMA_SM75_ENABLED
#endif
#endif
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 1688 - FP16 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation - F16 = F16 * F16 + F16
template <>
struct Mma<
gemm::GemmShape<16, 8, 8>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 8>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 2>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
unsigned const *C = reinterpret_cast<unsigned const *>(&c);
unsigned *D = reinterpret_cast<unsigned *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 {%0,%1}, {%2,%3}, {%4}, {%5,%6};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 1688 - FP32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct Mma<
gemm::GemmShape<16, 8, 8>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 8>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 2>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]),
"r"(B[0]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Integer matrix multiply .8816 (8b)
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 4>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 4>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.s32.u8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 4>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.s8.u8 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 4>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.s32.u8.u8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Integer matrix multiply (8b) with SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,16>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 4>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.s8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,16>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 4>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.u8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,16>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 4>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.s8.u8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,16>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 4>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.u8.u8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Integer matrix multiply (4b)
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
int4b_t,
layout::RowMajor,
int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int4b_t, 8>;
using ElementB = int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
uint4b_t,
layout::RowMajor,
int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint4b_t, 8>;
using ElementB = int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.u4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
int4b_t,
layout::RowMajor,
uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int4b_t, 8>;
using ElementB = uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.s4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
uint4b_t,
layout::RowMajor,
uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint4b_t, 8>;
using ElementB = uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.u4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Integer matrix multiply (4b) - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
int4b_t,
layout::RowMajor,
int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int4b_t, 8>;
using ElementB = int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.satfinite.s32.s4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
uint4b_t,
layout::RowMajor,
int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint4b_t, 8>;
using ElementB = int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("_mma.m8n8k32.row.col.u4.s4.sat {%0,%1}, %2, %3, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
int4b_t,
layout::RowMajor,
uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int4b_t, 8>;
using ElementB = uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.satfinite.s32.s4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
uint4b_t,
layout::RowMajor,
uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint4b_t, 8>;
using ElementB = uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.satfinite.s32.u4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// b1 ^ b1 + s32 => s32
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <>
struct Mma<
gemm::GemmShape<8,8,128>,
32,
uint1b_t,
layout::RowMajor,
uint1b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpXorPopc> {
using Shape = gemm::GemmShape<8,8,128>;
using ElementA = uint1b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint1b_t, 32>;
using ElementB = uint1b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint1b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpXorPopc;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
#if (__CUDA_ARCH__ >= 900) || (defined(CUTLASS_ARCH_WMMA_ENABLED))
using WmmaFragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
nvcuda::wmma::experimental::precision::b1,
nvcuda::wmma::row_major>;
using WmmaFragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
nvcuda::wmma::experimental::precision::b1,
nvcuda::wmma::col_major>;
using WmmaFragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
int>;
WmmaFragmentA const & A = reinterpret_cast<WmmaFragmentA const &>(a);
WmmaFragmentB const & B = reinterpret_cast<WmmaFragmentB const &>(b);
WmmaFragmentC const & C = reinterpret_cast<WmmaFragmentC const &>(c);
WmmaFragmentC & D = reinterpret_cast<WmmaFragmentC &>(d);
nvcuda::wmma::bmma_sync(D, A, B, C, nvcuda::wmma::experimental::bmmaBitOpXOR,
nvcuda::wmma::experimental::bmmaAccumulateOpPOPC);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0); // WMMA must be supported to issue binary matrix multiply-accumulate instructions.
#endif // defined(CUTLASS_ARCH_WMMA_ENABLED)
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| 31,652 | C | 23.31106 | 113 | 0.601352 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sm60.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#include <cuda_fp16.h>
#include "cutlass/arch/mma.h"
#include "cutlass/layout/matrix.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <typename LayoutA, typename LayoutB, typename LayoutC>
struct Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
LayoutA,
half_t,
LayoutB,
half_t,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<2, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 2> &d,
Array<half_t, 2> const &a,
Array<half_t, 1> const &b,
Array<half_t, 2> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 const & A = reinterpret_cast<__half2 const &>(a);
__half2 B = __half2half2(reinterpret_cast<__half const &>(b));
__half2 const & C = reinterpret_cast<__half2 const &>(c);
__half2 D = __hfma2(A, B, C);
d = reinterpret_cast<Array<half_t, 2> &>(D);
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
d[i] = a[i] * b[0] + c[i];
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <typename LayoutA, typename LayoutB>
struct Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
LayoutA,
half_t,
LayoutB,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 2, 1>;
using Operator = OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 2> &d,
Array<half_t, 1> const &a,
Array<half_t, 2> const &b,
Array<half_t, 2> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 const & A = __half2half2(reinterpret_cast<__half const &>(a));
__half2 B = reinterpret_cast<__half2 const &>(b);
__half2 const & C = reinterpret_cast<__half2 const &>(c);
__half2 D = __hfma2(A, B, C);
d = reinterpret_cast<Array<half_t, 2> &>(D);
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
d[i] = a[0] * b[i] + c[i];
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <>
struct Mma <
gemm::GemmShape<2, 2, 1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<2, 2, 1>;
using Operator = OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 4> &d,
Array<half_t, 2> const &a,
Array<half_t, 2> const &b,
Array<half_t, 4> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 const & A = reinterpret_cast<__half2 const &>(a);
__half2 Blo = __low2half2(reinterpret_cast<__half2 const &>(b));
__half2 Bhi = __high2half2(reinterpret_cast<__half2 const &>(b));
__half2 const *C = reinterpret_cast<__half2 const *>(&c);
__half2 Dlo = __hfma2(A, Blo, C[0]);
__half2 Dhi = __hfma2(A, Bhi, C[1]);
Array<half_t, 2> * D = reinterpret_cast<Array<half_t, 2> *>(&d);
D[0] = reinterpret_cast<Array<half_t, 2> const &>(Dlo);
D[1] = reinterpret_cast<Array<half_t, 2> const &>(Dhi);
#else
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < 2; ++j) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
d[i + 2 * j] = a[i] * b[j] + c[i + 2 * j];
}
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <>
struct Mma<
gemm::GemmShape<2, 2, 1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<2, 2, 1>;
using Operator = OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 4> &d,
Array<half_t, 2> const &a,
Array<half_t, 2> const &b,
Array<half_t, 4> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 Alo = __low2half2(reinterpret_cast<__half2 const &>(a));
__half2 Ahi = __high2half2(reinterpret_cast<__half2 const &>(a));
__half2 const & B = reinterpret_cast<__half2 const &>(b);
__half2 const *C = reinterpret_cast<__half2 const *>(&c);
__half2 Dlo = __hfma2(Alo, B, C[0]);
__half2 Dhi = __hfma2(Ahi, B, C[0]);
Array<half_t, 2> * D = reinterpret_cast<Array<half_t, 2> *>(&d);
D[0] = reinterpret_cast<Array<half_t, 2> &>(Dlo);
D[1] = reinterpret_cast<Array<half_t, 2> &>(Dhi);
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < 2; ++j) {
d[i * 2 + j] = a[i] * b[j] + c[i * 2 + j];
}
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
| 7,040 | C | 26.830039 | 100 | 0.553835 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for multiply-add operations
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/arch.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the operation implied by MMA.
struct OpMultiplyAdd;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the result is saturated to MAX_FLOAT|MIN_FLOAT or MAX_INT|MIN_INT
struct OpMultiplyAddSaturate;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to a narrower type (BF16)
struct OpMultiplyAddFastBF16;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to a narrower type (F16)
struct OpMultiplyAddFastF16;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to 2 (big and small) TF32 components
// Perform 3xTF32 or 4xTF32 for every F32 output element
struct OpMultiplyAddFastF32;
/// Tag indicating the input is converted to 2 (big and small) TF32 components
// Perform 3xTF32 or 4xTF32 for every complex<F32> output element
struct OpMultiplyAddComplexFastF32;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the complex multiply-add operation
struct OpMultiplyAddComplex;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the gaussian complex multiply-add operation
struct OpMultiplyAddGaussianComplex;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the inner product is defined by (XOR, POPC)
struct OpXorPopc;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifying math operators as thread-level operations.
struct OpClassSimt;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifing operators as Tensor Core operations.
struct OpClassTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifing operators as WMMA Tensor Core operations
struct OpClassWmmaTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Number of threads participating
int kThreads_,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator
>
struct Mma;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation - specialized for 1x1x1x1 matrix multiply operation
template <
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator_
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, ElementA, LayoutA, ElementB, LayoutB, ElementC_, LayoutC, Operator_> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = Operator_;
using ElementC = ElementC_;
CUTLASS_HOST_DEVICE
void operator()(
Array<ElementC, 1> &d,
Array<ElementA, 1> const &a,
Array<ElementB, 1> const &b,
Array<ElementC, 1> const &c
) {
multiply_add<ElementA, ElementB, ElementC> op;
d[0] = op(a[0], b[0], c[0]);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specifies internal data type for computation
struct SPFormatType {
enum Kind {
Thread
};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Number of threads participating
int kThreads_,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator,
/// Specifies meta data format
SPFormatType::Kind SPFormat = SPFormatType::Thread
>
struct SparseMma;
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations for each compute capability
//
#include "cutlass/arch/mma_sm50.h"
#include "cutlass/arch/mma_sm60.h"
#include "cutlass/arch/mma_sm61.h"
#include "cutlass/arch/mma_sm70.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/arch/mma_sparse_sm80.h"
#include "cutlass/arch/mma_sm90.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,037 | C | 34.100437 | 110 | 0.549956 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sparse_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Sparse matrix multiply accumulate for SM80
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
#if ((__CUDACC_VER_MAJOR__ > 11) || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))
#define CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED 1
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
#define CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED
#endif
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 16832
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F16 = F16 * F16 + F16
template <>
struct SparseMma<
gemm::GemmShape<16, 8, 32>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread
> {
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 8>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 8>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 2;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c, uint32_t const &E, int const id2) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
uint32_t const *C = reinterpret_cast<uint32_t const *>(&c);
uint32_t *D = reinterpret_cast<uint32_t *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f16.f16.f16.f16 {%0,%1}, "
"{%2,%3,%4,%5}, {%6,%7,%8,%9}, {%10,%11}, %12, 0x0;\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(E));
}
else if (id2 == 1) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f16.f16.f16.f16 {%0,%1}, "
"{%2,%3,%4,%5}, {%6,%7,%8,%9}, {%10,%11}, %12, 0x1;\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(E));
}
else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct SparseMma<
gemm::GemmShape<16, 8, 32>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread
> {
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 8>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 8>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 2;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c, uint32_t const &E, int const id2) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]),
"r"(E));
}
else if (id2 == 1) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]),
"r"(E));
}
else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 16832 - Float BF16, FP32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = bf16 * bf16 + F32
template <>
struct SparseMma<gemm::GemmShape<16, 8, 32>, 32, bfloat16_t, layout::RowMajor,
bfloat16_t, layout::ColumnMajor, float, layout::RowMajor,
OpMultiplyAdd, SPFormatType::Thread> {
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = bfloat16_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<bfloat16_t, 8>;
using ElementB = bfloat16_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<bfloat16_t, 8>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 2;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c, uint32_t const &E, int const id2) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f32.bf16.bf16.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else if (id2 == 1) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f32.bf16.bf16.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 16816 - Float TF32
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = tf32 * tf32 + F32
template <>
struct SparseMma<gemm::GemmShape<16, 8, 16>, 32, tfloat32_t, layout::RowMajor,
tfloat32_t, layout::ColumnMajor, float, layout::RowMajor,
OpMultiplyAdd, SPFormatType::Thread> {
using Shape = gemm::GemmShape<16, 8, 16>;
using ElementA = tfloat32_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<tfloat32_t, 4>;
using ElementB = tfloat32_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<tfloat32_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 4;
static int const kMaxID2 = 2;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c, uint32_t const &E, int const id2) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k16.row.col.f32.tf32.tf32.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else if (id2 == 1) {
asm volatile(
"mma.sp.sync.aligned.m16n8k16.row.col.f32.tf32.tf32.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 16864 - S8 input, S32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.s8.s8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.s8.u8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.u8.s8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.u8.u8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 16864 - S8 input, S32 accumulation - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.s8.s8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.s8.u8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.u8.s8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.u8.u8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 168128 - S4 input, S32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.s4.s4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.s4.u4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.u4.s4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.u4.u4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 168128 - S4 input, S32 accumulation - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.s4.s4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.s4.u4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.u4.s4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.u4.u4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 43,978 | C | 25.084816 | 102 | 0.560576 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sm90.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
#if ((__CUDACC_VER_MAJOR__ > 11) || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 8))
#define CUTLASS_ARCH_MMA_SM90_SUPPORTED 1
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900))
#define CUTLASS_ARCH_MMA_SM90_ENABLED
#endif
#endif
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
/// Matrix Multiply-Add 16x8x4 fp64
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F64 = F64 * F64 + F64
template <>
struct Mma<
gemm::GemmShape<16,8,4>,
32,
double,
layout::RowMajor,
double,
layout::ColumnMajor,
double,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,4>;
using ElementA = double;
using LayoutA = layout::RowMajor;
using FragmentA = Array<double, 2>;
using ElementB = double;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<double, 1>;
using ElementC = double;
using LayoutC = layout::RowMajor;
using FragmentC = Array<double, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm90;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM90_ENABLED)
double const *A = reinterpret_cast<double const *>(&a);
double const *B = reinterpret_cast<double const *>(&b);
double const *C = reinterpret_cast<double const *>(&c);
double *D = reinterpret_cast<double *>(&d);
asm volatile("mma.sync.aligned.m16n8k4.row.col.f64.f64.f64.f64 {%0, %1, %2, %3}, {%4, %5}, {%6}, {%7, %8, %9, %10};\n"
: "=d"(D[0]), "=d"(D[1]), "=d"(D[2]), "=d"(D[3])
: "d"(A[0]), "d"(A[1]),
"d"(B[0]),
"d"(C[0]), "d"(C[1]), "d"(C[2]), "d"(C[3]));
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 4,430 | C | 32.568182 | 120 | 0.575621 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sm70.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#if ((__CUDACC_VER_MAJOR__ > 10) || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))
#define CUTLASS_ARCH_MMA_SM70_SUPPORTED
#endif
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700))
#if ((__CUDACC_VER_MAJOR__ > 10) || (__CUDACC_VER_MAJOR__ == 10 &&__CUDACC_VER_MINOR__ >= 1))
#define CUTLASS_ARCH_MMA_SM70_ENABLED
#endif
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Matrix multiply accumulate 884 - FP16 accumulation
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F16 = F16 * F16 + F16
template <>
struct Mma<
gemm::GemmShape<8,8,4>,
8,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::ColumnMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
unsigned const *C = reinterpret_cast<unsigned const *>(&c);
unsigned *D = reinterpret_cast<unsigned *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.col.col.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/// Matrix multiply-add operation: F16 = F16 * F16 + F16
template <>
struct Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::ColumnMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::RowMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
unsigned const *C = reinterpret_cast<unsigned const *>(&c);
unsigned *D = reinterpret_cast<unsigned *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.col.row.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/// Matrix multiply-add operation: F16 = F16 * F16 + F16
template <>
struct Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
unsigned const *C = reinterpret_cast<unsigned const *>(&c);
unsigned *D = reinterpret_cast<unsigned *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.row.col.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/// Matrix multiply-add operation: F16 = F16 * F16 + F16
template <>
struct Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::RowMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
unsigned const *C = reinterpret_cast<unsigned const *>(&c);
unsigned *D = reinterpret_cast<unsigned *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.row.row.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Matrix multiply accumulate 884 - FP32 accumulation
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::ColumnMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
/// Multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.col.col.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, "
"{%12,%13,%14,%15,%16,%17,%18,%19};\n"
: "=f"(D[0]),
"=f"(D[1]),
"=f"(D[2]),
"=f"(D[3]),
"=f"(D[4]),
"=f"(D[5]),
"=f"(D[6]),
"=f"(D[7])
: "r"(A[0]),
"r"(A[1]),
"r"(B[0]),
"r"(B[1]),
"f"(C[0]),
"f"(C[1]),
"f"(C[2]),
"f"(C[3]),
"f"(C[4]),
"f"(C[5]),
"f"(C[6]),
"f"(C[7])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::ColumnMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::RowMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
/// Multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.col.row.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, "
"{%12,%13,%14,%15,%16,%17,%18,%19};\n"
: "=f"(D[0]),
"=f"(D[1]),
"=f"(D[2]),
"=f"(D[3]),
"=f"(D[4]),
"=f"(D[5]),
"=f"(D[6]),
"=f"(D[7])
: "r"(A[0]),
"r"(A[1]),
"r"(B[0]),
"r"(B[1]),
"f"(C[0]),
"f"(C[1]),
"f"(C[2]),
"f"(C[3]),
"f"(C[4]),
"f"(C[5]),
"f"(C[6]),
"f"(C[7])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
/// Multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.row.col.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, "
"{%12,%13,%14,%15,%16,%17,%18,%19};\n"
: "=f"(D[0]),
"=f"(D[1]),
"=f"(D[2]),
"=f"(D[3]),
"=f"(D[4]),
"=f"(D[5]),
"=f"(D[6]),
"=f"(D[7])
: "r"(A[0]),
"r"(A[1]),
"r"(B[0]),
"r"(B[1]),
"f"(C[0]),
"f"(C[1]),
"f"(C[2]),
"f"(C[3]),
"f"(C[4]),
"f"(C[5]),
"f"(C[6]),
"f"(C[7])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::RowMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
/// Multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.row.row.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, "
"{%12,%13,%14,%15,%16,%17,%18,%19};\n"
: "=f"(D[0]),
"=f"(D[1]),
"=f"(D[2]),
"=f"(D[3]),
"=f"(D[4]),
"=f"(D[5]),
"=f"(D[6]),
"=f"(D[7])
: "r"(A[0]),
"r"(A[1]),
"r"(B[0]),
"r"(B[1]),
"f"(C[0]),
"f"(C[1]),
"f"(C[2]),
"f"(C[3]),
"f"(C[4]),
"f"(C[5]),
"f"(C[6]),
"f"(C[7])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation specialized for the entire warp
template <
typename LayoutA,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename Operator
>
struct Mma<
gemm::GemmShape<16, 16, 4>,
32,
half_t,
LayoutA,
half_t,
LayoutB,
ElementC,
LayoutC,
Operator
> :
public Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
LayoutA,
half_t,
LayoutB,
ElementC,
LayoutC,
Operator> {
using Shape = gemm::GemmShape<16, 16, 4>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| 16,554 | C | 23.857357 | 118 | 0.552072 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/simd.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing SIMD operators
*/
#pragma once
#include "../array.h"
#include "../numeric_types.h"
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Element-wise operators
//
CUTLASS_HOST_DEVICE
template <typename T, int N>
Array<T, N> operator*(Array<T, N> const &a, Array<T, N> const &b) {
Array<T, N> d;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
d[i] = a[i] * b[i];
}
return d;
}
CUTLASS_HOST_DEVICE
template <typename T, int N>
Array<T, N> operator+(Array<T, N> const &a, Array<T, N> const &b) {
Array<T, N> d;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
d[i] = a[i] + b[i];
}
return d;
}
CUTLASS_HOST_DEVICE
template <typename T, int N>
Array<T, N> operator-(Array<T, N> const &a, Array<T, N> const &b) {
Array<T, N> d;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
d[i] = a[i] - b[i];
}
return d;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Multiply-accumulate operators
//
CUTLASS_HOST_DEVICE
template <typename T, int N>
Array<T, N> mac(Array<T, N> const &a, Array<T, N> const &b, Array<T, N> const &c) {
Array<T, N> d;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
d[i] = a[i] * b[i] + c[i];
}
return d;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Dot product operator
//
CUTLASS_HOST_DEVICE
template <typename Element, typename Accumulator, int N>
Accumulator dot(Array<T, N> const &a, Array<T, N> const &b, Accumulator accum) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
accum += a[i] * b[i];
}
return accum;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "simd_sm60.h"
#include "simd_sm61.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
| 3,998 | C | 30.738095 | 100 | 0.550025 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/memory_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Architecture-specific operators on memory added for SM80
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/cache_operation.h"
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
#define CUDA_CP_ASYNC_ACTIVATED 1
#else
#define CUDA_CP_ASYNC_ACTIVATED 0
#endif
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Initiates an asynchronous copy from global memory to shared memory.
///
/// LDGSTS
///
template <
/// Size of the access in bytes
int SizeInBytes,
/// Cache operation
CacheOperation::Kind cache_op = CacheOperation::Always>
struct cp_async;
/// Initiates an asynchronous copy from global memory to shared memory. Rather than predicate
/// the entire transfer, zeros are written to SMEM if the guard predicate is false.
///
/// LDGSTS
///
template <
/// Size of the access in bytes
int SizeInBytes,
/// Cache operation
CacheOperation::Kind cache_op = CacheOperation::Always>
struct cp_async_zfill;
/// Initiates an asynchronous copy from global memory to shared memory. Rather than predicate
/// the entire transfer, nans (0x7eff) are written to SMEM if the guard predicate is false.
///
/// LDGSTS
///
template <
/// Size of the access in bytes
int SizeInBytes,
/// Cache operation
CacheOperation::Kind cache_op = CacheOperation::Always>
struct cp_async_nan;
/// Either 0 or 1 are written to SMEM based on input element type
/// Used for diagonal elements of triangular matrix of BLAS3 functions
///
/// STS
///
template <
/// Type of Element
typename Element,
/// If the data is for a Hermitian matrix diagonal
bool IsHermitianData = false>
struct cp_async_diag;
static const uint32_t OOB_NAN_F16 = 0x7eff;
static const uint32_t OOB_NAN_F16x2 = ((OOB_NAN_F16 << 16) | OOB_NAN_F16);
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization
template <
/// Size of the access in bytes
int SizeInBytes>
struct cp_async<SizeInBytes, CacheOperation::Always> {
/// Copy
CUTLASS_DEVICE
cp_async(void *smem_ptr, void const *global_ptr, bool pred_guard = true) {
#if CUDA_CP_ASYNC_ACTIVATED
// Make sure the size is supported.
static_assert((SizeInBytes == 4 || SizeInBytes == 8 || SizeInBytes == 16),
"Size is not supported");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %0, 0;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p cp.async.ca.shared.global.L2::128B [%1], [%2], %3;\n"
#else
" @p cp.async.ca.shared.global [%1], [%2], %3;\n"
#endif
"}\n" ::"r"((int)pred_guard),
"r"(smem_int_ptr), "l"(global_ptr), "n"(SizeInBytes));
#else
using AccessType = Array<uint8_t, SizeInBytes>;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
}
#endif
}
};
/// Partial specialization
template <
/// Size of the access in bytes
int SizeInBytes>
struct cp_async_zfill<SizeInBytes, CacheOperation::Always> {
/// Copy with zero fill
CUTLASS_DEVICE
cp_async_zfill(void *smem_ptr, void const *global_ptr, bool pred_guard) {
#if CUDA_CP_ASYNC_ACTIVATED
// Make sure the size is supported.
static_assert((SizeInBytes == 4 || SizeInBytes == 8 || SizeInBytes == 16),
"Size is not supported");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
int src_in_bytes = (pred_guard ? SizeInBytes : 0);
asm volatile(
#if CUTLASS_ENABLE_L2_PREFETCH
"cp.async.ca.shared.global.L2::128B [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
#else
"cp.async.ca.shared.global [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
#endif
"l"(global_ptr), "n"(SizeInBytes), "r"(src_in_bytes));
#else
using AccessType = Array<uint8_t, SizeInBytes>;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
}
else {
AccessType zeros;
zeros.clear();
*static_cast<AccessType *>(smem_ptr) = zeros;
}
#endif
}
};
/// Partial specialization
template <>
struct cp_async_nan<16, CacheOperation::Always> {
static int const kSizeInBytes = 16;
/// Copy with nan fill
CUTLASS_DEVICE
cp_async_nan(void *smem_ptr, void const *global_ptr, bool pred_guard) {
#if CUDA_CP_ASYNC_ACTIVATED
static __constant__ uint4 OOB_NAN_F16x8 = {OOB_NAN_F16x2, OOB_NAN_F16x2,
OOB_NAN_F16x2, OOB_NAN_F16x2};
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %0, 0;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p cp.async.ca.shared.global.L2::128B [%1], [%2], %3;\n"
#else
" @p cp.async.ca.shared.global [%1], [%2], %3;\n"
#endif
" @!p st.shared.v4.u32 [%1], {%4, %5, %6, %7};\n"
"}\n"
:
: "r"((int)pred_guard), "r"(smem_int_ptr), "l"(global_ptr),
"n"(kSizeInBytes), "r"(OOB_NAN_F16x8.x), "r"(OOB_NAN_F16x8.y), "r"(OOB_NAN_F16x8.z),
"r"(OOB_NAN_F16x8.w));
#else
CUTLASS_UNUSED(smem_ptr);
CUTLASS_UNUSED(global_ptr);
CUTLASS_UNUSED(pred_guard);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Partial specialization to write one (1)
template<typename Element_>
struct cp_async_diag <Element_, false> {
using Element = Element_;
CUTLASS_DEVICE
cp_async_diag(void *smem_ptr) {
#if CUDA_CP_ASYNC_ACTIVATED
/// Values for the diagonal elements of the triangular input matrix
static __constant__ uint2 DIAG_DATA_DOUBLE_ONE = {0x3ff00000, 0x00000000};
static __constant__ uint1 DIAG_DATA_FLOAT_ONE = {0x3f800000};
static __constant__ uint1 DIAG_DATA_ZERO = {0x00000000};
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
if (platform::is_same<Element, complex<double>>::value) {
asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_DOUBLE_ONE.y), "r"(DIAG_DATA_DOUBLE_ONE.x),
"r"(DIAG_DATA_ZERO.x), "r"(DIAG_DATA_ZERO.x));
} else if (platform::is_same<Element, complex<float>>::value) {
asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_FLOAT_ONE.x), "r"(DIAG_DATA_ZERO.x));
} else if (platform::is_same<Element, double>::value) {
asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_DOUBLE_ONE.y),"r"(DIAG_DATA_DOUBLE_ONE.x));
} else if (platform::is_same<Element, float>::value) {
asm volatile("st.shared.u32 [%0], %1;\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_FLOAT_ONE.x));
} else {
CUTLASS_UNUSED(smem_int_ptr);
CUTLASS_NOT_IMPLEMENTED();
}
#else
CUTLASS_UNUSED(smem_ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Partial specialization to write zero for the imaginary part of Hermitian data
template<typename Element_>
struct cp_async_diag <Element_, true> {
using Element = Element_;
CUTLASS_DEVICE
cp_async_diag(void *smem_ptr) {
#if CUDA_CP_ASYNC_ACTIVATED
/// Values for the diagonal elements of the triangular input matrix
static __constant__ uint1 DIAG_DATA_ZERO = {0x00000000};
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
if (platform::is_same<Element, complex<double>>::value) {
asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_ZERO.x), "r"(DIAG_DATA_ZERO.x));
} else if (platform::is_same<Element, complex<float>>::value) {
asm volatile("st.shared.u32 [%0], %1;\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_ZERO.x));
} else {
CUTLASS_UNUSED(smem_int_ptr);
CUTLASS_NOT_IMPLEMENTED();
}
#else
CUTLASS_UNUSED(smem_ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization
template <
/// Size of the access in bytes
int SizeInBytes>
struct cp_async<SizeInBytes, CacheOperation::Global> {
/// Copy
CUTLASS_DEVICE
cp_async(void *smem_ptr, void const *global_ptr, bool pred_guard = true) {
#if CUDA_CP_ASYNC_ACTIVATED
static_assert(SizeInBytes == 16,
"cp.async only supports CacheOperation::Global when access size is 16B.");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %0, 0;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p cp.async.cg.shared.global.L2::128B [%1], [%2], %3;\n"
#else
" @p cp.async.cg.shared.global [%1], [%2], %3;\n"
#endif
"}\n" ::"r"((int)pred_guard),
"r"(smem_int_ptr), "l"(global_ptr), "n"(SizeInBytes));
#else
using AccessType = Array<uint8_t, SizeInBytes>;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
}
#endif
}
};
/// Partial specialization
template <
/// Size of the access in bytes
int SizeInBytes>
struct cp_async_zfill<SizeInBytes, CacheOperation::Global> {
/// Copy with zero fill
CUTLASS_DEVICE
cp_async_zfill(void *smem_ptr, void const *global_ptr, bool pred_guard = true) {
#if CUDA_CP_ASYNC_ACTIVATED
static_assert(SizeInBytes == 16,
"cp.async only supports CacheOperation::Global when access size is 16B.");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
int src_in_bytes = (pred_guard ? SizeInBytes : 0);
asm volatile(
#if CUTLASS_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
#else
"cp.async.cg.shared.global [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
#endif
"l"(global_ptr), "n"(SizeInBytes), "r"(src_in_bytes));
#else
using AccessType = Array<uint8_t, SizeInBytes>;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
}
else {
AccessType zeros;
zeros.clear();
*static_cast<AccessType *>(smem_ptr) = zeros;
}
#endif
}
};
/// Partial specialization
template <>
struct cp_async_nan<16, CacheOperation::Global> {
static int const kSizeInBytes = 16;
/// Copy with nan fill
CUTLASS_DEVICE
cp_async_nan(void *smem_ptr, void const *global_ptr, bool pred_guard) {
#if CUDA_CP_ASYNC_ACTIVATED
static __constant__ uint4 OOB_NAN_F16x8 = {OOB_NAN_F16x2, OOB_NAN_F16x2,
OOB_NAN_F16x2, OOB_NAN_F16x2};
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %0, 0;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p cp.async.cg.shared.global.L2::128B [%1], [%2], %3;\n"
#else
" @p cp.async.cg.shared.global [%1], [%2], %3;\n"
#endif
" @!p st.shared.v4.u32 [%1], {%4, %5, %6, %7};\n"
"}\n"
:
: "r"((int)pred_guard), "r"(smem_int_ptr), "l"(global_ptr),
"n"(kSizeInBytes), "r"(OOB_NAN_F16x8.x), "r"(OOB_NAN_F16x8.y), "r"(OOB_NAN_F16x8.z),
"r"(OOB_NAN_F16x8.w));
#else
CUTLASS_UNUSED(smem_ptr);
CUTLASS_UNUSED(global_ptr);
CUTLASS_UNUSED(pred_guard);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Establishes an ordering w.r.t previously issued cp.async instructions. Does not block.
CUTLASS_DEVICE
void cp_async_fence() {
#if CUDA_CP_ASYNC_ACTIVATED
asm volatile("cp.async.commit_group;\n" ::);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Blocks until all but <N> previous cp.async.commit_group operations have committed.
template <int N>
CUTLASS_DEVICE void cp_async_wait() {
#if CUDA_CP_ASYNC_ACTIVATED
asm volatile("cp.async.wait_group %0;\n" ::"n"(N));
#endif
}
/// Blocks until all previous cp.async.commit_group operations have committed.
template <>
CUTLASS_DEVICE void cp_async_wait<0>() {
#if CUDA_CP_ASYNC_ACTIVATED
asm volatile("cp.async.wait_all;\n" ::);
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 15,154 | C | 31.45182 | 100 | 0.57602 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/wmma_sm72.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/layout/matrix.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for int8_t
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
int8_t, ///< ElementA
LayoutA_, ///< LayoutA
int8_t, ///< ElementB
LayoutB_, ///< LayoutB
int32_t, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM72_ENABLED)
using Shape = Shape_;
using ElementA = int8_t;
using LayoutA = LayoutA_;
using ElementB = int8_t;
using LayoutB = LayoutB_;
using ElementC = int32_t;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpMultiplyAdd;
using ArchTag = arch::Sm72;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<16, 16, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape< 8, 32, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape<32, 8, 16>, Shape>::value,
"Supported list of wmma operator shape for s8 multiplicands are: 16x16x16, 8x32x16, and 32x8x16");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::mma_sync(D, A, B, C);
}
#else
static_assert(false, "wmma.mma.sync interger type multiplicands is avialable only for SM72 and beyond");
#endif
};
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for uint8_t
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
uint8_t, ///< ElementA
LayoutA_, ///< LayoutA
uint8_t, ///< ElementB
LayoutB_, ///< LayoutB
int32_t, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM72_ENABLED)
using Shape = Shape_;
using ElementA = uint8_t;
using LayoutA = LayoutA_;
using ElementB = uint8_t;
using LayoutB = LayoutB_;
using ElementC = int32_t;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpMultiplyAdd;
using ArchTag = arch::Sm72;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<16, 16, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape< 8, 32, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape<32, 8, 16>, Shape>::value,
"Supported list of wmma operator shape for u8 multiplicands are: 16x16x16, 8x32x16, and 32x8x16");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::mma_sync(D, A, B, C);
}
#else
static_assert(false, "wmma.mma.sync interger type multiplicands is avialable only for SM72 and beyond");
#endif
};
} // namespace arch
} // namespace cutlass
| 7,746 | C | 35.71564 | 108 | 0.5874 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
#if ((__CUDACC_VER_MAJOR__ > 11) || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))
#define CUTLASS_ARCH_MMA_SM80_SUPPORTED 1
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
#define CUTLASS_ARCH_MMA_SM80_ENABLED
#endif
#endif
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 1688 - Float BF16, FP32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation - F32 = bf16 * bf16 + F32
template <>
struct Mma<
gemm::GemmShape<16, 8, 8>,
32,
bfloat16_t,
layout::RowMajor,
bfloat16_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 8>;
using ElementA = bfloat16_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<bfloat16_t, 4>;
using ElementB = bfloat16_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<bfloat16_t, 2>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm(
"mma.sync.aligned.m16n8k8.row.col.f32.bf16.bf16.f32 "
"{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]),
"r"(B[0]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 1684 - Float TF32
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = tf32 * tf32 + F32
template <>
struct Mma<
gemm::GemmShape<16, 8, 4>,
32,
tfloat32_t,
layout::RowMajor,
tfloat32_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 4>;
using ElementA = tfloat32_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<tfloat32_t, 2>;
using ElementB = tfloat32_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<tfloat32_t, 1>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k4.row.col.f32.tf32.tf32.f32 {%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]),
"r"(B[0]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 1688 - Float TF32
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = tf32 * tf32 + F32
template <>
struct Mma<gemm::GemmShape<16, 8, 8>, 32, tfloat32_t, layout::RowMajor,
tfloat32_t, layout::ColumnMajor, float, layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 8>;
using ElementA = tfloat32_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<tfloat32_t, 4>;
using ElementB = tfloat32_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<tfloat32_t, 2>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]));
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16816
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F16 = F16 * F16 + F16
template <>
struct Mma<
gemm::GemmShape<16, 8, 16>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 16>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 8>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
uint32_t const *C = reinterpret_cast<uint32_t const *>(&c);
uint32_t *D = reinterpret_cast<uint32_t *>(&d);
asm volatile("mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16 {%0,%1}, {%2,%3,%4,%5}, {%6,%7}, {%8,%9};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]),
"r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1])
);
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = bf16 * bf16 + F32
template <>
struct Mma<
gemm::GemmShape<16, 8, 16>,
32,
bfloat16_t,
layout::RowMajor,
bfloat16_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 16>;
using ElementA = bfloat16_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<bfloat16_t, 8>;
using ElementB = bfloat16_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<bfloat16_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]));
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct Mma<
gemm::GemmShape<16, 8, 16>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 16>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 8>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, "
"{%10,%11,%12,%13};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]));
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 884 - F64
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F64 = F64 * F64 + F64
template <>
struct Mma<
gemm::GemmShape<8,8,4>,
32,
double,
layout::RowMajor,
double,
layout::ColumnMajor,
double,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8,8,4>;
using ElementA = double;
using LayoutA = layout::RowMajor;
using FragmentA = Array<double, 1>;
using ElementB = double;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<double, 1>;
using ElementC = double;
using LayoutC = layout::RowMajor;
using FragmentC = Array<double, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
double const & A = reinterpret_cast<double const &>(a);
double const & B = reinterpret_cast<double const &>(b);
double const *C = reinterpret_cast<double const *>(&c);
double *D = reinterpret_cast<double *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.row.col.f64.f64.f64.f64 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=d"(D[0]), "=d"(D[1])
: "d"(A), "d"(B), "d"(C[0]), "d"(C[1]));
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16816 - S8 input, S32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 8>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32 {%0,%1,%2,%3}, {%4,%5}, {%6}, "
"{%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 8>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.u8.s8.s32 {%0,%1,%2,%3}, {%4,%5}, {%6}, "
"{%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 8>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.s8.u8.s32 {%0,%1,%2,%3}, {%4,%5}, {%6}, "
"{%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 8>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.u8.u8.s32 {%0,%1,%2,%3}, {%4,%5}, {%6}, "
"{%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16816 - S8 input, S32 accumulation - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 8>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32.satfinite {%0,%1,%2,%3}, {%4,%5}, "
"{%6}, {%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 8>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.u8.s8.s32.satfinite {%0,%1,%2,%3}, {%4,%5}, "
"{%6}, {%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 8>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.s8.u8.s32.satfinite {%0,%1,%2,%3}, {%4,%5}, "
"{%6}, {%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,16>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 8>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const &B = reinterpret_cast<uint32_t const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.s32.u8.u8.s32.satfinite {%0,%1,%2,%3}, {%4,%5}, "
"{%6}, {%7,%8,%9,%10};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B), "r"(C[0]), "r"(C[1]), "r"(C[2]),
"r"(C[3]));
#else
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16832 - S8 input, S32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u8.s8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s8.u8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u8.u8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16832 - S8 input, S32 accumulation - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const * A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const * B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u8.s8.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.s8.u8.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,32>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16,8,32>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k32.row.col.s32.u8.u8.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16864 - S4 input, S32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.s4.s4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.u4.s4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.s4.u4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.u4.u4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 16864 - S4 input, S32 accumulation - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const * A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const * B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.s4.s4.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.u4.s4.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.s4.u4.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<16, 8, 64>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<16, 8, 64>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k64.row.col.s32.u4.u4.s32.satfinite {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = B1 & B1 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,256>,
32,
cutlass::uint1b_t,
layout::RowMajor,
cutlass::uint1b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,256>;
using ElementA = cutlass::uint1b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint1b_t, 128>;
using ElementB = cutlass::uint1b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint1b_t, 64>;
using ElementC = int32_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int32_t, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k256.row.col.s32.b1.b1.s32.and.popc {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 168256 - B1 input, S32 accumulation - XOR,POPC
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = B1 & B1 + S32
template <>
struct Mma<
gemm::GemmShape<16,8,256>,
32,
cutlass::uint1b_t,
layout::RowMajor,
cutlass::uint1b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpXorPopc> {
using Shape = gemm::GemmShape<16,8,256>;
using ElementA = cutlass::uint1b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint1b_t, 128>;
using ElementB = cutlass::uint1b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint1b_t, 64>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using Operator = OpXorPopc;
using ArchTag = arch::Sm80;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k256.row.col.s32.b1.b1.s32.xor.popc {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, "
"{%8,%9}, {%10,%11,%12,%13};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif // defined(CUTLASS_ARCH_MMA_SM80_ENABLED)
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 55,581 | C | 24.426349 | 110 | 0.565985 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/simd_sm60.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing SIMD operators for SM60
*/
#pragma once
#include "simd.h"
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Element-wise operators - specialized for half_t x 2
//
CUTLASS_HOST_DEVICE
template <>
Array<half_t, 2> operator*(Array<half_t, 2> const &a, Array<half_t, 2> const &b) {
Array<half_t, 2> d;
// TODO
return d;
}
CUTLASS_HOST_DEVICE
template <>
Array<half_t, 2> operator+(AArray<half_t, 2> const &a, Array<half_t, 2> const &b) {
Array<half_t, 2> d;
// TODO
return d;
}
CUTLASS_HOST_DEVICE
template <>
Array<half_t, 2> operator-(Array<half_t, 2> const &a, Array<half_t, 2> const &b) {
Array<T, N> d;
// TODO
return d;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Multiply-accumulate operators - specialized for half_t x 2
CUTLASS_HOST_DEVICE
template <>
Array<half_t, 2> mac(Array<half_t, 2> const &a, Array<half_t, 2> const &b, Array<half_t, 2> const &c) {
Array<half_t, 2> d;
// TODO
return d;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Dot product operator - specialized for half_t <- (half_t * half_t) x 2 + half_t
CUTLASS_HOST_DEVICE
template <>
half_t dot(Array<half_t, 2> const &a, Array<half_t, 2> const &b, half_t accum) {
// TODO
return accum;
}
/// Dot product operator - specialized for float <- (half_t * half_t) x 2 + float
CUTLASS_HOST_DEVICE
template <>
float dot(Array<half_t, 2> const &a, Array<half_t, 2> const &b, float accum) {
// TODO
return accum;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| 3,656 | C | 30.25641 | 103 | 0.597101 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/memory.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Architecture-specific operators on memory
*/
#pragma once
#include "cutlass/cutlass.h"
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Fragment type to store loaded data
typename AccessType,
/// The bytes of loading
int LoadBytes
>
struct global_load;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
#if (((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 4)) || \
(__CUDACC_VER_MAJOR__ > 11)) && \
defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750) && \
! (defined(__clang__) && defined(__CUDA__))
#define CUTLASS_ENABLE_L2_PREFETCH 1
#else
#define CUTLASS_ENABLE_L2_PREFETCH 0
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
// The redundant mov PTX instruction is used to enforce the compiler to
// keep the initializing code before ld.global
template <typename AccessType>
struct global_load<AccessType,
32
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint4 *data = reinterpret_cast<uint4 *>(&D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %9, 0;\n"
" mov.b32 %0, %10;\n"
" mov.b32 %1, %11;\n"
" mov.b32 %2, %12;\n"
" mov.b32 %3, %13;\n"
" mov.b32 %4, %14;\n"
" mov.b32 %5, %15;\n"
" mov.b32 %6, %16;\n"
" mov.b32 %7, %17;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.v4.u32 {%0, %1, %2, %3}, [%8];\n"
" @p ld.global.L2::128B.v4.u32 {%4, %5, %6, %7}, [%18];\n"
#else
" @p ld.global.v4.u32 {%0, %1, %2, %3}, [%8];\n"
" @p ld.global.v4.u32 {%4, %5, %6, %7}, [%18];\n"
#endif
"}\n"
: "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w),
"=r"(data[1].x), "=r"(data[1].y), "=r"(data[1].z), "=r"(data[1].w)
: "l"(ptr), "r"((int)pred_guard), "r"(data[0].x), "r"(data[0].y),
"r"(data[0].z), "r"(data[0].w), "r"(data[1].x), "r"(data[1].y),
"r"(data[1].z), "r"(data[1].w), "l"(((uint8_t *)ptr) + 16));
}
};
template <typename AccessType>
struct global_load<AccessType,
16
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint4 &data = reinterpret_cast<uint4 &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" mov.b32 %0, %6;\n"
" mov.b32 %1, %7;\n"
" mov.b32 %2, %8;\n"
" mov.b32 %3, %9;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.v4.u32 {%0, %1, %2, %3}, [%4];\n"
#else
" @p ld.global.v4.u32 {%0, %1, %2, %3}, [%4];\n"
#endif
"}\n"
: "=r"(data.x), "=r"(data.y), "=r"(data.z), "=r"(data.w)
: "l"(ptr), "r"((int)pred_guard), "r"(data.x), "r"(data.y), "r"(data.z), "r"(data.w));
}
};
template <typename AccessType>
struct global_load<AccessType,
8
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint2 &data = reinterpret_cast<uint2 &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %3, 0;\n"
" mov.b32 %0, %4;\n"
" mov.b32 %1, %5;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.v2.u32 {%0, %1}, [%2];\n"
#else
" @p ld.global.v2.u32 {%0, %1}, [%2];\n"
#endif
"}\n"
: "=r"(data.x), "=r"(data.y)
: "l"(ptr), "r"((int)pred_guard), "r"(data.x), "r"(data.y));
}
};
template <typename AccessType>
struct global_load<AccessType,
4
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
unsigned &data = reinterpret_cast<unsigned &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" mov.b32 %0, %3;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.u32 %0, [%1];\n"
#else
" @p ld.global.u32 %0, [%1];\n"
#endif
"}\n"
: "=r"(data)
: "l"(ptr), "r"((int)pred_guard), "r"(data));
}
};
template <typename AccessType>
struct global_load<AccessType,
2
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint16_t &data = reinterpret_cast<uint16_t &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" mov.b16 %0, %3;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.u16 %0, [%1];\n"
#else
" @p ld.global.u16 %0, [%1];\n"
#endif
"}\n"
: "=h"(data)
: "l"(ptr), "r"((int)pred_guard), "h"(data));
}
};
template <typename AccessType>
struct global_load<AccessType,
1
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
if (pred_guard) D = *(reinterpret_cast<AccessType const *>(ptr));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Fragment type to store data
typename AccessType,
/// The bytes of storing
int StoreBytes
>
struct global_store;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType>
struct global_store<AccessType, 64> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint4 const *data = reinterpret_cast<uint4 const *>(&D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" @p st.global.v4.u32 [%0], {%1, %2, %3, %4};\n"
" @p st.global.v4.u32 [%6], {%7, %8, %9, %10};\n"
" @p st.global.v4.u32 [%11], {%12, %13, %14, %15};\n"
" @p st.global.v4.u32 [%16], {%17, %18, %19, %20};\n"
"}\n"
:
: "l"(ptr), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z),
"r"(data[0].w), "r"((int)pred_guard), "l"(((uint8_t *)ptr) + 16),
"r"(data[1].x), "r"(data[1].y), "r"(data[1].z), "r"(data[1].w),
"l"(((uint8_t *)ptr) + 32),
"r"(data[2].x), "r"(data[2].y), "r"(data[2].z), "r"(data[2].w),
"l"(((uint8_t *)ptr) + 48),
"r"(data[3].x), "r"(data[3].y), "r"(data[3].z), "r"(data[3].w));
}
};
template <typename AccessType>
struct global_store<AccessType, 32> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint4 const *data = reinterpret_cast<uint4 const *>(&D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" @p st.global.v4.u32 [%0], {%1, %2, %3, %4};\n"
" @p st.global.v4.u32 [%6], {%7, %8, %9, %10};\n"
"}\n"
:
: "l"(ptr), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z),
"r"(data[0].w), "r"((int)pred_guard), "l"(((uint8_t *)ptr) + 16),
"r"(data[1].x), "r"(data[1].y), "r"(data[1].z), "r"(data[1].w));
}
};
template <typename AccessType>
struct global_store<AccessType, 16> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint4 const &data = reinterpret_cast<uint4 const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" @p st.global.v4.u32 [%0], {%1, %2, %3, %4};\n"
"}\n"
:
: "l"(ptr), "r"(data.x), "r"(data.y), "r"(data.z), "r"(data.w), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 8> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint2 const &data = reinterpret_cast<uint2 const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %3, 0;\n"
" @p st.global.v2.u32 [%0], {%1, %2};\n"
"}\n"
:
: "l"(ptr), "r"(data.x), "r"(data.y), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 4> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint32_t const &data = reinterpret_cast<uint32_t const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" @p st.global.u32 [%0], %1;\n"
"}\n"
:
: "l"(ptr), "r"(data), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 2> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint16_t const &data = reinterpret_cast<uint16_t const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" @p st.global.u16 [%0], %1;\n"
"}\n"
:
: "l"(ptr), "h"(data), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 1> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
if (pred_guard) *(reinterpret_cast<AccessType *>(ptr)) = D;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// ld.shared
template <int Bytes>
CUTLASS_DEVICE
void shared_load(void *dst, uint32_t ptr);
/// ld.shared - 16b
template <>
CUTLASS_DEVICE
void shared_load<2>(void *dst, uint32_t ptr) {
asm volatile("ld.shared.u16 %0, [%1];\n"
: "=h"(*reinterpret_cast<uint16_t *>(dst))
: "r"(ptr));
}
/// ld.shared - 32b
template <>
CUTLASS_DEVICE
void shared_load<4>(void *dst, uint32_t ptr) {
asm volatile("ld.shared.u32 %0, [%1];\n"
: "=r"(*reinterpret_cast<uint32_t *>(dst))
: "r"(ptr));
}
/// ld.shared - 64b
template <>
CUTLASS_DEVICE
void shared_load<8>(void *dst, uint32_t ptr) {
uint2 *dst_u64 = reinterpret_cast<uint2 *>(dst);
asm volatile("ld.shared.v2.u32 {%0, %1}, [%2];\n"
:
"=r"(dst_u64->x),
"=r"(dst_u64->y)
: "r"(ptr));
}
/// ld.shared - 128b
template <>
CUTLASS_DEVICE
void shared_load<16>(void *dst, uint32_t ptr) {
uint4 *dst_u128 = reinterpret_cast<uint4 *>(dst);
asm volatile("ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n"
:
"=r"(dst_u128->x),
"=r"(dst_u128->y),
"=r"(dst_u128->z),
"=r"(dst_u128->w)
: "r"(ptr));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// st.shared
template <int Bytes>
CUTLASS_DEVICE
void shared_store(uint32_t ptr, void const *src);
/// st.shared - 16b
template <>
CUTLASS_DEVICE
void shared_store<2>(uint32_t ptr, void const *src) {
asm volatile("st.shared.u16 [%0], %1;\n"
: :
"r"(ptr),
"h"(*reinterpret_cast<uint16_t const *>(src))
);
}
/// st.shared - 32b
template <>
CUTLASS_DEVICE
void shared_store<4>(uint32_t ptr, void const *src) {
asm volatile("st.shared.u32 [%0], %1;\n"
: :
"r"(ptr),
"r"(*reinterpret_cast<uint32_t const *>(src))
);
}
/// st.shared - 64b
template <>
CUTLASS_DEVICE
void shared_store<8>(uint32_t ptr, void const *src) {
uint2 const *dst_u64 = reinterpret_cast<uint2 const *>(src);
asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
: :
"r"(ptr),
"r"(dst_u64->x),
"r"(dst_u64->y)
);
}
/// st.shared - 128b
template <>
CUTLASS_DEVICE
void shared_store<16>(uint32_t ptr, void const *src) {
uint4 const *dst_u128 = reinterpret_cast<uint4 const *>(src);
asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};\n"
: :
"r"(ptr),
"r"(dst_u128->x),
"r"(dst_u128->y),
"r"(dst_u128->z),
"r"(dst_u128->w)
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "memory_sm75.h"
#include "memory_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
| 14,313 | C | 29.134737 | 100 | 0.490603 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/wmma.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for warp matrix multiply-add (WMMA) operations
*/
#pragma once
// CUTLASS WMMA does not support clang at present.
#if !(defined(__clang__) && defined(__CUDA__))
#if (__CUDACC_VER_MAJOR__ >= 9)
#if (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700))
#define CUTLASS_ARCH_WMMA_ENABLED
#define CUTLASS_ARCH_WMMA_SM70_ENABLED
#endif
#endif
#if (__CUDACC_VER_MAJOR__ >= 10)
#if (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 720))
#define CUTLASS_ARCH_INTEGER_MATRIX_MULTIPLY_ENABLED
#define CUTLASS_ARCH_WMMA_SM72_ENABLED
#endif
#endif
#if (__CUDACC_VER_MAJOR__ >= 10)
#if (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 750))
#define CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED
#define CUTLASS_ARCH_WMMA_SM75_ENABLED
#endif
#endif
#endif //!(defined(__clang__) && defined(__CUDA__))
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include <mma.h>
#include "cutlass/arch/mma.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically maps cutlass data types => nvcuda::wmma data types
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Type_>
struct CutlassToWmmaDataType{
using Type = Type_;
};
/// Statically maps cutlass::half_t => __half
template<>
struct CutlassToWmmaDataType<cutlass::half_t> {
using Type = __half;
};
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) && (__CUDACC_VER_MAJOR__ >= 11)
template<>
struct CutlassToWmmaDataType<cutlass::bfloat16_t> {
using Type = __nv_bfloat16;
};
#endif
/// Statically maps int8_t => char
template<>
struct CutlassToWmmaDataType<int8_t> {
using Type = signed char;
};
/// Statically maps uint8_t => char
template<>
struct CutlassToWmmaDataType<uint8_t> {
using Type = unsigned char;
};
/// Statically maps int32_t => int
template<>
struct CutlassToWmmaDataType<int32_t> {
using Type = int;
};
#if defined(CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED)
/// Statically maps cutlass::int4b_t => experimental::precision::s4
template<>
struct CutlassToWmmaDataType<cutlass::int4b_t> {
using Type = nvcuda::wmma::experimental::precision::s4;
};
/// Statically maps cutlass::uint4b_t => experimental::precision::s4
template<>
struct CutlassToWmmaDataType<cutlass::uint4b_t> {
using Type = nvcuda::wmma::experimental::precision::u4;
};
/// Statically maps cutlass::uint1b_t => experimental::precision::b1
template<>
struct CutlassToWmmaDataType<cutlass::uint1b_t> {
using Type = nvcuda::wmma::experimental::precision::b1;
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically maps cutlass::layout => nvcuda::wmma layout tags
////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Layout_>
struct CutlassToWmmaLayout {
};
/// Statically maps cutlass::layout::RowMajor => nvcuda::wmma::row_major layout tags
template <>
struct CutlassToWmmaLayout<cutlass::layout::RowMajor> {
using Layout = nvcuda::wmma::row_major;
static nvcuda::wmma::layout_t const value = nvcuda::wmma::layout_t::mem_row_major;
};
////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically maps cutlass::layout::RowMajor => nvcuda::wmma::row_major layout tags
////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct CutlassToWmmaLayout<cutlass::layout::ColumnMajor> {
using Layout = nvcuda::wmma::col_major;
static nvcuda::wmma::layout_t const value = nvcuda::wmma::layout_t::mem_col_major;
};
////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically maps nvcuda::wmma data types => cutlass data types
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Type_>
struct WmmaToCutlassDataType{
using Type = Type_;
};
/// Statically maps __half => cutlass::half_t
template<>
struct WmmaToCutlassDataType<__half> {
using Type = cutlass::half_t;
};
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) && (__CUDACC_VER_MAJOR__ >= 11)
template<>
struct WmmaToCutlassDataType<__nv_bfloat16> {
using Type = cutlass::bfloat16_t;
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// WMMA template structure defines nvcuda::wmma::fragments and static assertion chaeks
// for a specific template paramterized data type (Element[A|B|C]), layout (Layout[A|B|C]),
// and native wmma size (Shape)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_, ///< Size of the matrix product (concept: GemmShape)
typename ElementA_, ///< Data type of A elements
typename LayoutA_, ///< Layout of A matrix (concept: MatrixLayout)
typename ElementB_, ///< Data type of B elements
typename LayoutB_, ///< Layout of B matrix (concept: MatrixLayout)
typename ElementC_, ///< Element type of C matrix
typename LayoutC_, /// Layout of C matrix (concept: MatrixLayout)
typename Operator_ = cutlass::arch::OpMultiplyAdd ///< Inner product operator (multiply-add, xor.popc)
>
struct Wmma;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations for each compute capability
//
#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED
#include "cutlass/arch/wmma_sm70.h"
#endif
#ifdef CUTLASS_ARCH_WMMA_SM72_ENABLED
#include "cutlass/arch/wmma_sm72.h"
#endif
#ifdef CUTLASS_ARCH_WMMA_SM75_ENABLED
#include "cutlass/arch/wmma_sm75.h"
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
#endif //CUTLASS_ARCH_WMMA_ENABLED
| 8,473 | C | 36.830357 | 106 | 0.555647 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sm61.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#include "cutlass/layout/matrix.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <typename LayoutA, typename LayoutB, typename LayoutC>
struct Mma<
gemm::GemmShape<1,1,4>,
1,
int8_t,
LayoutA,
int8_t,
LayoutB,
int,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 4>;
using Operator = OpMultiplyAdd;
using ElementC = int;
CUTLASS_HOST_DEVICE
void operator()(
Array<int, 1> &d,
Array<int8_t, 4> const &a,
Array<int8_t, 4> const &b,
Array<int, 1> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 610))
unsigned const &A = reinterpret_cast<unsigned const &>(a);
unsigned const &B = reinterpret_cast<unsigned const &>(b);
asm volatile("dp4a.s32.s32 %0, %1, %2, %3;"
: "=r"(d[0])
: "r"(A), "r"(B), "r"(c[0]));
#else
d[0] = c[0];
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < 4; ++k) {
d[0] += a[k] * b[k];
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <typename LayoutC>
struct Mma<
gemm::GemmShape<1, 1, 2>,
1,
int16_t,
layout::RowMajor,
int16_t,
layout::ColumnMajor,
int,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 2>;
using Operator = OpMultiplyAdd;
using ElementC = int;
CUTLASS_HOST_DEVICE
void operator()(
Array<int, 1> &d,
Array<int16_t, 2> const &a,
Array<int16_t, 2> const &b,
Array<int, 1> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 610))
unsigned const &A = reinterpret_cast<unsigned const &>(a);
unsigned const &B = reinterpret_cast<unsigned const &>(b);
asm volatile("dp2a.s32.s32 %0, %1, %2, %3;"
: "=r"(d[0])
: "r"(A), "r"(B), "r"(c[0]));
#else
d[0] = c[0];
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < 2; ++k) {
d[0] += a[k] * b[k];
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
| 4,193 | C | 28.328671 | 100 | 0.56165 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/cache_operation.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Directives related to cache operations
*/
#pragma once
#include "cutlass/cutlass.h"
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Controls PTX cache operations
struct CacheOperation {
enum Kind {
/// Cache at all levels - accessed again
Always,
/// Cache at global level
Global,
/// Streaming - likely to be accessed once
Streaming,
/// Indicates the line will not be used again
LastUse,
/// Don't cache, and fetch again
Volatile,
/// Write back at all coherent levels
WriteBack,
/// Write through to system memory
WriteThrough
};
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| 2,691 | C | 39.179104 | 100 | 0.629877 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/wmma_sm75.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/layout/matrix.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for cutlass::int4b_t (experimental::s4).
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
cutlass::int4b_t, ///< ElementA
LayoutA_, ///< LayoutA
cutlass::int4b_t, ///< ElementB
LayoutB_, ///< LayoutB
int32_t, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM75_ENABLED)
using Shape = Shape_;
using ElementA = cutlass::int4b_t;
using LayoutA = LayoutA_;
using ElementB = cutlass::int4b_t;
using LayoutB = LayoutB_;
using ElementC = int32_t;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpMultiplyAdd;
using ArchTag = arch::Sm75;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<8, 8, 32>, Shape>::value,
"Supported list of wmma operator shape for s8 multiplicands is: 8x8x32");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::mma_sync(D, A, B, C);
}
#else
static_assert(false, "wmma.mma.sync interger type multiplicands is avialable only for SM75 and beyond");
#endif
};
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for cutlass::uint1b_t (experimental::b1).
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
cutlass::uint1b_t, ///< ElementA
LayoutA_, ///< LayoutA
cutlass::uint1b_t, ///< ElementB
LayoutB_, ///< LayoutB
int32_t, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpXorPopc ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM75_ENABLED)
using Shape = Shape_;
using ElementA = cutlass::uint1b_t;
using LayoutA = LayoutA_;
using ElementB = cutlass::uint1b_t;
using LayoutB = LayoutB_;
using ElementC = int32_t;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpXorPopc;
using ArchTag = arch::Sm75;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<8, 8, 128>, Shape>::value,
"Supported list of wmma operator shape for b1 multiplicands is: 8x8x128");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::bmma_sync(D, A, B, C, nvcuda::wmma::experimental::bmmaBitOpXOR,
nvcuda::wmma::experimental::bmmaAccumulateOpPOPC);
}
#else
static_assert(false, "wmma.mma.sync interger type multiplicands is avialable only for SM75 and beyond");
#endif
};
} // namespace arch
} // namespace cutlass
| 7,616 | C | 35.620192 | 108 | 0.589417 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/memory_sm75.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Architecture-specific operators on memory added for SM75
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Layout of destination matrix (column-major implies transpose)
typename Layout,
/// .x1, .x2, or .x4
int MatrixCount
>
inline __device__ void ldsm(Array<unsigned, MatrixCount> & D, void const* ptr);
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Determine the appropriate way to target PTX's "ldmatrix" instruction.
//
/////////////////////////////////////////////////////////////////////////////////////////////////
#if (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2) || (__CUDACC_VER_MAJOR__ >= 11)
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750)
#define CUDA_LDMATRIX_ACTIVATED 1
#endif
#define CUDA_LDMATRIX_SUPPORTED 1
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/*
#if ! defined(CUDA_NVVM_GET_SMEM_POINTER_SUPPORTED) && (__CUDACC_VER_MAJOR__ > 10)
#define CUDA_NVVM_GET_SMEM_POINTER_SUPPORTED 1
#endif
#if ! defined(CUDA_NVVM_GET_SMEM_POINTER_SUPPORTED)
#define CUDA_NVVM_GET_SMEM_POINTER_SUPPORTED ((__CUDACC_VER_MAJOR__ == 10) && (__CUDACC_VER_MINOR__ >= 1))
#endif
#if ! defined(CUDA_NVVM_GET_SMEM_POINTER_ENABLED)
#define CUDA_NVVM_GET_SMEM_POINTER_ENABLED CUDA_NVVM_GET_SMEM_POINTER_SUPPORTED
#endif
*/
#if (! defined (__clang__) && __CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2)
extern "C" {
//
// This NVVM intrinsic is subject to change in future versions of CUDA.
// Clients should not call it directly. Rather, they should use the
// cutlass::arch::ldsm<>() template.
//
__device__ uint32_t __nvvm_get_smem_pointer(void *);
}
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/// CUTLASS helper to get SMEM pointer
inline __device__ unsigned cutlass_get_smem_pointer(void *ptr) {
// We prefer to use the new CVTA intrinsics if they are available, otherwise we will fall back to
// the previous internal intrinsics if they are available.
#if (! defined (__clang__) && defined(__CUDA_ARCH__) && __CUDACC_VER_MAJOR__ >= 11)
//
// This NVVM intrinsic converts an address in shared memory to a plain
// unsigned integer. This is necessary to pass to shared memory instructions
// in inline PTX.
//
// In CUDA 11 and beyond, this replaces __nvvm_get_smem_pointer() [only available in 10.2].
//
//__device__ size_t __cvta_generic_to_shared(void* ptr);
/// CUTLASS helper to get SMEM pointer
return static_cast<unsigned>(__cvta_generic_to_shared(ptr));
#elif (! defined (__clang__) && defined(__CUDA_ARCH__) && __CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2)
return __nvvm_get_smem_pointer(ptr);
#elif defined(__CUDA_ARCH__)
uint32_t smem_ptr;
asm(
"{ .reg .u64 smem_ptr; cvta.to.shared.u64 smem_ptr, %1; cvt.u32.u64 %0, smem_ptr; }\n"
: "=r"(smem_ptr) : "l"(ptr));
return smem_ptr;
#else
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
return 0;
#endif
}
/// CUTLASS helper to get SMEM pointer
inline __device__ unsigned cutlass_get_smem_pointer(void const *ptr) {
return cutlass_get_smem_pointer(const_cast<void *>(ptr));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::RowMajor, 1>(
Array<unsigned, 1> & D,
void const* ptr) {
#if defined(CUDA_LDMATRIX_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x;
asm volatile ("ldmatrix.sync.aligned.x1.m8n8.shared.b16 {%0}, [%1];" : "=r"(x) : "r"(addr));
reinterpret_cast<int &>(D) = x;
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::RowMajor, 2>(
Array<unsigned, 2> & D,
void const* ptr) {
#if defined(CUDA_LDMATRIX_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y;
asm volatile ("ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0, %1}, [%2];" : "=r"(x), "=r"(y) : "r"(addr));
reinterpret_cast<int2 &>(D) = make_int2(x, y);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::RowMajor, 4>(
Array<unsigned, 4> & D,
void const* ptr) {
#if defined(CUDA_LDMATRIX_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y, z, w;
asm volatile ("ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];" : "=r"(x), "=r"(y), "=r"(z), "=r"(w) : "r"(addr));
reinterpret_cast<int4 &>(D) = make_int4(x, y, z, w);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Transpose on 16b granularity
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::ColumnMajor, 1>(
Array<unsigned, 1> & D,
void const* ptr) {
#if CUDA_LDMATRIX_ACTIVATED
unsigned addr = cutlass_get_smem_pointer(ptr);
int x;
asm volatile ("ldmatrix.sync.aligned.x1.trans.m8n8.shared.b16 {%0}, [%1];" : "=r"(x) : "r"(addr));
reinterpret_cast<int &>(D) = x;
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::ColumnMajor, 2>(
Array<unsigned, 2> & D,
void const* ptr) {
#if defined(CUDA_LDMATRIX_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y;
asm volatile ("ldmatrix.sync.aligned.x2.trans.m8n8.shared.b16 {%0, %1}, [%2];" : "=r"(x), "=r"(y) : "r"(addr));
reinterpret_cast<int2 &>(D) = make_int2(x, y);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::ColumnMajor, 4>(
Array<unsigned, 4> & D,
void const* ptr) {
#if defined(CUDA_LDMATRIX_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y, z, w;
asm volatile ("ldmatrix.sync.aligned.x4.trans.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];" : "=r"(x), "=r"(y), "=r"(z), "=r"(w) : "r"(addr));
reinterpret_cast<int4 &>(D) = make_int4(x, y, z, w);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType, int Bytes>
struct shared_load_op {
CUTLASS_DEVICE
shared_load_op(AccessType &D, void const *ptr) {
D = *reinterpret_cast<AccessType const *>(ptr);
}
};
template <typename AccessType>
CUTLASS_DEVICE void shared_load(AccessType &D, void const *ptr) {
shared_load_op<AccessType, int(sizeof(AccessType))>(D, ptr);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType>
struct shared_load_op<AccessType, 16> {
CUTLASS_DEVICE
shared_load_op(AccessType &D, void const *ptr) {
unsigned addr = cutlass_get_smem_pointer(ptr);
uint4 v;
asm volatile ("ld.shared.v4.b32 {%0, %1, %2, %3}, [%4];" :
"=r"(v.x), "=r"(v.y), "=r"(v.z), "=r"(v.w) : "r"(addr));
D = reinterpret_cast<AccessType const &>(v);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType>
struct shared_load_op<AccessType, 8> {
CUTLASS_DEVICE
shared_load_op(AccessType &D, void const *ptr) {
unsigned addr = cutlass_get_smem_pointer(ptr);
uint2 v;
asm volatile ("ld.shared.v2.b32 {%0, %1}, [%2];" :
"=r"(v.x), "=r"(v.y) : "r"(addr));
D = reinterpret_cast<AccessType const &>(v);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| 10,490 | C | 29.855882 | 141 | 0.545472 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/simd_sm61.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing SIMD operators for SM61
*/
#pragma once
#include "simd.h"
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Dot product operator - specialized for int32_t <- (int8_t * int8_t) x 4 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<int8_t, 4> const &a, Array<int8_t, 4> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (uint8_t * int8_t) x 4 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<uint8_t, 4> const &a, Array<int8_t, 4> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (int8_t * uint8_t) x 4 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<int8_t, 4> const &a, Array<uint8_t, 4> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (uint8_t * uint8_t) x 4 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<uint8_t, 4> const &a, Array<uint8_t, 4> const &b, int32_t accum) {
return accum;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Dot product operator - specialized for int32_t <- (int16_t * int8_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<int16_t, 2> const &a, Array<int8_t, 2> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (uint16_t * int8_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<uint16_t, 2> const &a, Array<int8_t, 2> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (int16_t * int8_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<int16_t, 2> const &a, Array<uint8_t, 2> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (uint16_t * int8_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<uint16_t, 2> const &a, Array<uint8_t, 2> const &b, int32_t accum) {
return accum;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Dot product operator - specialized for int32_t <- (int16_t * int16_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<int16_t, 2> const &a, Array<int16_t, 2> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (uint16_t * int16_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<uint16_t, 2> const &a, Array<int16_t, 2> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (int16_t * int16_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<int16_t, 2> const &a, Array<uint16_t, 2> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (uint16_t * int16_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<uint16_t, 2> const &a, Array<uint16_t, 2> const &b, int32_t accum) {
return accum;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| 5,102 | C | 33.47973 | 100 | 0.624265 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/pitch_linear_thread_map.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing how threads are mapped to a given tile.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
////////////////////////////////////////////////////////////////////////////////
/// Strip-mines a pitch-linear tile among a given number of threads, first along
/// the contiguous dimension then along the strided dimension.
///
/// The tile must be divisible by the thread count such that all threads may
/// execute the same number of iterations with the same delta to exhaustively
/// cover the tile.
///
/// This class satisfies the "RegularThreadMapping" concept.
///
/// This ThreadMap is used by SIMT kernels and operand E of the sparse tensor
/// kernels.
template <
typename Shape_,
int Threads,
int ElementsPerAccess = 1
>
struct PitchLinearStripminedThreadMap {
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Number of threads total
static int const kThreads = Threads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ElementsPerAccess;
/// Shape of access by each thread
using ThreadAccessShape = layout::PitchLinearShape<kElementsPerAccess, 1>;
/// Internal implementation details
struct Detail {
static_assert(!(Shape::kContiguous % kElementsPerAccess), "");
/// Shape of the tile in units of vectors
using ShapeVec = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess,
Shape::kStrided
>;
static_assert((Threads < ShapeVec::kContiguous && !(ShapeVec::kContiguous % kThreads)) ||
(!(kThreads % ShapeVec::kContiguous)),
"Shape must be divisible by number of iterations of each thread.");
};
/// Number of iterations by each thread
using Iterations = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<
1,
// Redo the comparison here to work around divide by zero compiler
// error. The compiler evaluates both path of platform::conditional.
(Threads >= Detail::ShapeVec::kContiguous
? (Detail::ShapeVec::kStrided + (kThreads / Detail::ShapeVec::kContiguous - 1)) /
(kThreads / Detail::ShapeVec::kContiguous)
: 0)>,
layout::PitchLinearShape<Detail::ShapeVec::kContiguous / kThreads,
Detail::ShapeVec::kStrided>>::type;
/// Interval between accesses along each dimension of the tensor's logical coordinate space
/// (in units of Elements)
using Delta = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<
1,
kThreads / Detail::ShapeVec::kContiguous
>,
layout::PitchLinearShape<
kThreads * kElementsPerAccess,
1
>
>::type;
/// Shape of the tile in units of vectors
using StorageShape = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<Shape::kContiguous,
Iterations::kStrided*(kThreads / Detail::ShapeVec::kContiguous)>,
layout::PitchLinearShape<Shape::kContiguous, Shape::kStrided>>::type;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
/// (in units of Elements)
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
return TensorCoord(
(thread_id % Detail::ShapeVec::kContiguous) * kElementsPerAccess,
thread_id / Detail::ShapeVec::kContiguous);
}
};
/// This ThreadMap is used by GEMV
template <
typename Shape,
int Threads,
int ElementsPerAccess = 1
>
struct PitchLinearTilePolicyStripminedThreadContiguous
{
static_assert((Shape::kContiguous % (Threads * ElementsPerAccess)) == 0,
"Contiguous shape must divide number of threads");
using TensorCoord = layout::PitchLinearCoord;
static int const kThreads = Threads;
static int const kElementsPerAccess = ElementsPerAccess;
using Iterations = layout::PitchLinearShape<
Shape::kContiguous / (kThreads * kElementsPerAccess),
Shape::kStrided>;
using Delta = layout::PitchLinearShape<1, 1>;
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id)
{
return TensorCoord(thread_id * Iterations::kContiguous * kElementsPerAccess, 0);
}
};
template <
typename Shape,
int Threads,
int ElementsPerAccess = 1
>
struct PitchLinearTilePolicyStripminedThreadStrided
{
static_assert((Shape::kStrided % Threads == 0),
"Strided shape must divide number of threads");
using TensorCoord = layout::PitchLinearCoord;
static int const kThreads = Threads;
static int const kElementsPerAccess = ElementsPerAccess;
using Iterations = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess,
Shape::kStrided / kThreads>;
using Delta = layout::PitchLinearShape<1, 1>;
using ShapeVec = Shape;
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id)
{
return TensorCoord(0, thread_id * Iterations::kStrided);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Policy defining a warp-raked arrangement in which a shape is partitioned into contiguous
/// elements.
///
/// This ThreadMap is used by tensor core kernels.
template <
typename Shape_,
int Threads,
typename WarpThreadArrangement_,
int ElementsPerAccess = 1
>
struct PitchLinearWarpRakedThreadMap {
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Number of threads total
static int const kThreads = Threads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ElementsPerAccess;
/// Shape of access by each thread
using ThreadAccessShape = layout::PitchLinearShape<kElementsPerAccess, 1>;
/// Internal details made public to facilitate introspection
struct Detail {
/// Fixed arrangement of threads within a warp (units of threads).
using WarpThreadArrangement = WarpThreadArrangement_;
/// Number of threads per warp
static int const kWarpSize = WarpThreadArrangement::kCount;
/// Number of participating warps
static int const kWarpCount = kThreads / kWarpSize;
static_assert(
!(Shape::kContiguous % kElementsPerAccess),
"Shape must be divisible by vector length.");
/// Compute the 'shape' of the overall tile in units of vectors
using ShapeInAccesses = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess,
Shape::kStrided
>;
static_assert(
!(ShapeInAccesses::kContiguous % WarpThreadArrangement::kContiguous),
"ShapeInAccesses must be divisible by WarpThreadArrangement.");
static_assert(
!(ShapeInAccesses::kStrided % WarpThreadArrangement::kStrided),
"ShapeInAccesses must be divisible by WarpThreadArrangement.");
// compute number of warp-level accesses total
using WarpAccessIterations = layout::PitchLinearShape<
ShapeInAccesses::kContiguous / WarpThreadArrangement::kContiguous,
ShapeInAccesses::kStrided / WarpThreadArrangement::kStrided
>;
// Divide it into the number of warps, first partitioning the strided dimension then the
// contiguous.
static int const kWarpsStrided =
(WarpAccessIterations::kStrided >= kWarpCount
? kWarpCount
: WarpAccessIterations::kStrided);
static int const kWarpsContiguous =
(kWarpCount > WarpAccessIterations::kStrided
? kWarpCount / kWarpsStrided
: 1);
/// Arrangement of warps within a threadblock-scoped tile
using WarpArrangement = layout::PitchLinearShape<
kWarpsContiguous, kWarpsStrided
>;
};
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations = layout::PitchLinearShape<
Detail::WarpAccessIterations::kContiguous / Detail::kWarpsContiguous,
Detail::WarpAccessIterations::kStrided / Detail::kWarpsStrided
>;
static_assert(Iterations::kCount,
"Number of iterations must be non-zero");
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta = layout::PitchLinearShape<
Detail::WarpThreadArrangement::kContiguous * kElementsPerAccess,
Detail::WarpThreadArrangement::kStrided
>;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
int warp_id = (thread_id / Detail::kWarpSize);
int lane_id = (thread_id % Detail::kWarpSize);
//
// compute warp-level offset
//
// This is the shape of the entire area covered by a warp's memory access (in units of vectors)
layout::PitchLinearCoord warp_footprint{
Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous,
Detail::WarpThreadArrangement::kStrided * Iterations::kStrided
};
// This is the offset of a specific warp (in units of vectors)
layout::PitchLinearCoord warp_offset{
(warp_id % Detail::kWarpsContiguous),
(warp_id / Detail::kWarpsContiguous)
};
// This is the offset of a specific thread within a warp (units of vectors)
layout::PitchLinearCoord thread_offset_in_warp{
lane_id % Detail::WarpThreadArrangement::kContiguous,
lane_id / Detail::WarpThreadArrangement::kContiguous
};
// This is the offset of a thread within a threadblock tile (units of vectors)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec =
warp_footprint * warp_offset + thread_offset_in_warp;
// This is the offset of a thread within a threadblock tile (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{
thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess,
thread_offset_in_threadblock_tile_vec.strided()
};
return thread_offset_in_threadblock_tile_base;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Policy defining a warp-raked arrangement in which a shape is partitioned into contiguous
/// elements. Warps are arranged based on a stride.
///
/// This ThreadMap is used by tensor core kernels for NCxHWx layout.
template <
typename Shape_,
int Threads,
typename WarpThreadArrangement_,
int ElementsPerAccess = 1
>
struct PitchLinearStridedWarpRakedThreadMap {
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Number of threads total
static int const kThreads = Threads;
using WarpThreadArrangement = WarpThreadArrangement_;
/// Extract vector length from Layout
static int const kElementsPerAccess = ElementsPerAccess;
/// Base ThreadMap
using BaseThreadMap = PitchLinearWarpRakedThreadMap<
Shape,
kThreads,
WarpThreadArrangement,
kElementsPerAccess
>;
/// Shape of access by each thread
using ThreadAccessShape = typename BaseThreadMap::ThreadAccessShape;
struct Detail {
using WarpThreadArrangement = WarpThreadArrangement_;
using WarpAccessIterations = typename BaseThreadMap::Detail::WarpAccessIterations;
static int const kWarpSize = BaseThreadMap::Detail::kWarpSize;
static int const kWarpCount = BaseThreadMap::Detail::kWarpCount;
using ShapeInAccesses = typename BaseThreadMap::Detail::ShapeInAccesses;
// Divide it into the number of warps, first partitioning the contiguous dimension then the
// stride.
static int const kWarpsContiguous =
(WarpAccessIterations::kContiguous >= kWarpCount
? kWarpCount
: WarpAccessIterations::kContiguous);
static int const kWarpsStrided =
(kWarpCount > WarpAccessIterations::kContiguous
? kWarpCount / kWarpsContiguous
: 1);
/// Arrangement of warps within a threadblock-scoped tile
using WarpArrangement = layout::PitchLinearShape<
kWarpsContiguous, kWarpsStrided
>;
};
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations = layout::PitchLinearShape<
Detail::WarpAccessIterations::kContiguous / Detail::kWarpsContiguous,
Detail::WarpAccessIterations::kStrided / Detail::kWarpsStrided
>;
static_assert(Iterations::kCount,
"Number of iterations must be non-zero");
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta = typename BaseThreadMap::Delta;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
int warp_id = (thread_id / Detail::kWarpSize);
int lane_id = (thread_id % Detail::kWarpSize);
//
// compute warp-level offset
//
// This is the shape of the entire area covered by a warp's memory access (in units of vectors)
layout::PitchLinearCoord warp_footprint{
Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous,
Detail::WarpThreadArrangement::kStrided * Iterations::kStrided
};
// This is the offset of a specific warp (in units of vectors)
layout::PitchLinearCoord warp_offset{
(warp_id % Detail::kWarpsContiguous),
(warp_id / Detail::kWarpsContiguous)
};
// This is the offset of a specific thread within a warp (units of vectors)
layout::PitchLinearCoord thread_offset_in_warp{
lane_id % Detail::WarpThreadArrangement::kContiguous,
lane_id / Detail::WarpThreadArrangement::kContiguous
};
// This is the offset of a thread within a threadblock tile (units of vectors)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec =
warp_footprint * warp_offset + thread_offset_in_warp;
// This is the offset of a thread within a threadblock tile (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{
thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess,
thread_offset_in_threadblock_tile_vec.strided()
};
return thread_offset_in_threadblock_tile_base;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Transpose the existing ThreadMap. For example, interleaved layout is like
/// congruous in the global memory and crosswise in the shared memory. We need
/// to transpose the coordinates between two.
template <typename ThreadMap_, typename WarpThreadArrangement_>
struct TransposePitchLinearThreadMap {
/// Underlying ThreadMap
using ThreadMap = ThreadMap_;
/// Tensor coordinate
using TensorCoord = typename ThreadMap::TensorCoord;
/// Tile shape
using Shape = typename ThreadMap::Shape;
/// Number of threads total
static int const kThreads = ThreadMap::kThreads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
/// Shape of access by each thread
using ThreadAccessShape = layout::PitchLinearShape<kElementsPerAccess, 1>;
/// Internal details made public to facilitate introspection
struct Detail {
/// Fixed arrangement of threads within a warp (units of threads).
using WarpThreadArrangement = WarpThreadArrangement_;
/// Number of threads per warp
static int const kWarpSize = WarpThreadArrangement::kCount;
/// Number of participating warps
static int const kWarpCount = kThreads / kWarpSize;
static_assert(!(Shape::kContiguous % kElementsPerAccess),
"Shape must be divisible by vector length.");
/// Arrangement of warps within a threadblock-scoped tile
using WarpArrangement =
layout::PitchLinearShape<ThreadMap::Detail::kWarpsStrided,
ThreadMap::Detail::kWarpsContiguous>;
};
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations =
layout::PitchLinearShape<ThreadMap::Iterations::kStrided,
ThreadMap::Iterations::kContiguous>;
static_assert(Iterations::kContiguous == 1,
"Contiguous iteration has to be one to reuse the same shared store function with those that don't need transpose");
static_assert(Iterations::kCount, "Number of iterations must be non-zero");
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta =
layout::PitchLinearShape<Detail::WarpThreadArrangement::kContiguous *
kElementsPerAccess,
Detail::WarpThreadArrangement::kStrided>;
/// Maps thread ID to a coordinate offset within the tensor's logical
/// coordinate space Note this is slightly different from the one of
/// PitchLinearWarpRakedThreadMap.
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
int warp_id = (thread_id / Detail::kWarpSize);
int lane_id = (thread_id % Detail::kWarpSize);
//
// compute warp-level offset
//
// This is the shape of the entire area covered by a warp's memory access
// (in units of vectors)
layout::PitchLinearCoord warp_footprint{
Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous,
Detail::WarpThreadArrangement::kStrided * Iterations::kStrided};
// This is the offset of a specific warp (in units of vectors)
// Note the order of / and %. Also the 2nd operand is kStrided.
layout::PitchLinearCoord warp_offset{
(warp_id / Detail::WarpArrangement::kStrided),
(warp_id % Detail::WarpArrangement::kStrided)};
// This is the offset of a specific thread within a warp (units of vectors)
layout::PitchLinearCoord thread_offset_in_warp{
lane_id % Detail::WarpThreadArrangement::kContiguous,
lane_id / Detail::WarpThreadArrangement::kContiguous};
// This is the offset of a thread within a threadblock tile (units of
// vectors)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec =
warp_footprint * warp_offset + thread_offset_in_warp;
// This is the offset of a thread within a threadblock tile (units of
// elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{
thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess,
thread_offset_in_threadblock_tile_vec.strided()};
return thread_offset_in_threadblock_tile_base;
}
};
template <typename ThreadMap_>
struct TransposePitchLinearThreadMapSimt {
/// Underlying ThreadMap
using ThreadMap = ThreadMap_;
/// Tensor coordinate
using TensorCoord = typename ThreadMap::TensorCoord;
/// Tile shape
using Shape = typename ThreadMap::Shape;
/// Number of threads total
static int const kThreads = ThreadMap::kThreads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static_assert(kElementsPerAccess == 1 , "Simt transpose requires elements per access to be 1");
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations =
layout::PitchLinearShape<ThreadMap::Iterations::kStrided,
ThreadMap::Iterations::kContiguous>;
static_assert(Iterations::kCount, "Number of iterations must be non-zero");
static_assert(Iterations::kStrided == 1,
"Strided iteration has to be one to reuse the same shared store function with those that don't need transpose");
/// Shape of access by each thread
using ThreadAccessShape = typename ThreadMap::ThreadAccessShape;
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta =
layout::PitchLinearShape<ThreadMap::Delta::kStrided,
ThreadMap::Delta::kContiguous>;
/// Maps thread ID to a coordinate offset within the tensor's logical
/// coordinate space Note this is slightly different from the one of
/// PitchLinearWarpRakedThreadMap.
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
TensorCoord coord = ThreadMap::initial_offset(thread_id);
return TensorCoord(
coord.strided(),
coord.contiguous()
);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Policy defining a warp-striped arrangement. This partitions a tile into vectorized memory
/// accesses performed by each warp then distributes warps across them. Warps are striped in the
/// strided dimension and raked across the contiguous dimension.
template <
typename Shape_, /// Overall shape to partition in units of elements
int Threads, /// Number of partiticipation threads
typename WarpThreadArrangement_, /// Describes the shape of one memory access per warp
int ElementsPerAccess = 1 /// Number of elements accessed by each thread per memory operation (i.e. vector size)
>
struct PitchLinearWarpStripedThreadMap {
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Number of threads total
static int const kThreads = Threads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ElementsPerAccess;
/// Shape of access by each thread
using ThreadAccessShape = layout::PitchLinearShape<kElementsPerAccess, 1>;
/// Internal details made public to facilitate introspection
struct Detail {
/// Fixed arrangement of threads within a warp (units of threads).
using WarpThreadArrangement = WarpThreadArrangement_;
/// Number of threads per warp
static int const kWarpSize = WarpThreadArrangement::kCount;
/// Number of participating warps
static int const kWarpCount = kThreads / kWarpSize;
static_assert(
!(Shape::kContiguous % kElementsPerAccess),
"Shape must be divisible by vector length.");
/// Compute the 'shape' of the overall tile in units of vectors
using ShapeInAccesses = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess,
Shape::kStrided
>;
// compute number of warp-level accesses total
using WarpAccessIterations = layout::PitchLinearShape<
ShapeInAccesses::kContiguous / WarpThreadArrangement::kContiguous,
ShapeInAccesses::kStrided / WarpThreadArrangement::kStrided
>;
// Divide it into the number of warps, first partitioning the strided dimension then the
// contiguous.
static int const kWarpsStrided =
(WarpAccessIterations::kStrided >= kWarpCount
? kWarpCount : (kWarpCount / WarpAccessIterations::kStrided));
static int const kWarpsContiguous =
(kWarpCount > WarpAccessIterations::kStrided ?
WarpAccessIterations::kContiguous / kWarpsStrided : 1);
/// Arrangement of warps within a threadblock-scoped tile
using WarpArrangement = layout::PitchLinearShape<
kWarpsContiguous, kWarpsStrided
>;
};
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations = layout::PitchLinearShape<
Detail::WarpAccessIterations::kContiguous / Detail::kWarpsContiguous,
Detail::WarpAccessIterations::kStrided / Detail::kWarpsStrided
>;
static_assert(Iterations::kCount,
"Number of iterations must be non-zero");
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta = layout::PitchLinearShape<
Detail::WarpThreadArrangement::kContiguous * kElementsPerAccess,
Detail::WarpThreadArrangement::kStrided * Detail::WarpArrangement::kStrided
>;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
int warp_id = (thread_id / Detail::kWarpSize);
int lane_id = (thread_id % Detail::kWarpSize);
//
// compute warp-level offset
//
// This is the shape of the entire area covered by a warp's memory access (in units of vectors)
layout::PitchLinearCoord warp_footprint{
Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous,
Detail::WarpThreadArrangement::kStrided
};
// This is the offset of a specific warp (in units of vectors)
layout::PitchLinearCoord warp_offset{
(warp_id % Detail::kWarpsContiguous),
(warp_id / Detail::kWarpsContiguous)
};
// This is the offset of a specific thread within a warp (units of vectors)
layout::PitchLinearCoord thread_offset_in_warp{
lane_id % Detail::WarpThreadArrangement::kContiguous,
lane_id / Detail::WarpThreadArrangement::kContiguous
};
// This is the offset of a thread within a threadblock tile (units of vectors)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec =
warp_footprint * warp_offset + thread_offset_in_warp;
// This is the offset of a thread within a threadblock tile (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{
thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess,
thread_offset_in_threadblock_tile_vec.strided()
};
return thread_offset_in_threadblock_tile_base;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Strip-mines a pitch-linear tile among a given number of threads, first along the contiguous
/// dimension then along the strided dimension, while each thread access a 2D thread-tile.
///
/// The tile must be divisible by the thread count such that all threads may execute the same
/// number of iterations with the same delta to exhaustively cover the tile.
///
/// This class satisfies the "RegularThreadMapping" concept.
template <
typename Shape_,
int Threads,
typename ThreadTileShape
>
struct PitchLinear2DThreadTileStripminedThreadMap;
template <
typename Shape_,
int Threads
>
struct PitchLinear2DThreadTileStripminedThreadMap <Shape_, Threads, cutlass::layout::PitchLinearShape<4, 4>>{
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Access Shape of each thread
using ThreadAccessShape = cutlass::layout::PitchLinearShape<4, 4>;
//using ThreadAccessShape = ThreadTileShape;
/// Number of threads total
static int const kThreads = Threads;
/// Extract length of each access from Layout
static int const kElementsPerAccess = ThreadAccessShape::kContiguous;
static_assert(!(kElementsPerAccess % 4) , "kElementsPerAccess, needs to be multiple of 4 (32bits)");
/// Internal implementation details
struct Detail {
static_assert(!(ThreadAccessShape::kContiguous % 4), "ThreadAccessShape, needs to be multiple of 4");
static_assert(!(Shape::kContiguous % ThreadAccessShape::kContiguous), "");
static_assert(!((Shape::kContiguous * Shape::kStrided) % (kThreads * ThreadAccessShape::kCount)),
"Shape must be divisible thread count * accesses per thread.");
/// Shape of the tile in units of vectors
using ShapeVec = layout::PitchLinearShape<
Shape::kContiguous / ThreadAccessShape::kContiguous,
Shape::kStrided / ThreadAccessShape::kStrided
>;
static_assert(
(Threads < ShapeVec::kContiguous && !(ShapeVec::kContiguous % kThreads)) ||
(!(kThreads % ShapeVec::kContiguous) && !(ShapeVec::kStrided % (kThreads / ShapeVec::kContiguous))),
"Shape must be divisible by number of iterations of each thread."
);
};
/// Number of iterations by each thread
using Iterations = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<
1,
// Redo the comparison here to work around divide by zero compiler
// error. The compiler evaluates both path of platform::conditional.
(Threads >= Detail::ShapeVec::kContiguous
? Detail::ShapeVec::kStrided /
(kThreads / Detail::ShapeVec::kContiguous)
: 0)>,
layout::PitchLinearShape<Detail::ShapeVec::kContiguous / kThreads,
Detail::ShapeVec::kStrided>>::type;
/// Interval between accesses along each dimension of the tensor's logical coordinate space
/// (in units of Elements)
using Delta = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<
Shape::kContiguous,
kThreads * ThreadAccessShape::kStrided / Detail::ShapeVec::kContiguous
>,
layout::PitchLinearShape<
kThreads * ThreadAccessShape::kContiguous,
1
>
>::type;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
/// (in units of Elements)
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
return TensorCoord(
(thread_id % Detail::ShapeVec::kContiguous) * ThreadAccessShape::kContiguous,
(thread_id / Detail::ShapeVec::kContiguous) * ThreadAccessShape::kStrided);
}
};
/// Thread Mapping a 2D threadtiled mapping as a tranposed Pitchlinear2DThreadTile mapping
template <typename ThreadMap_>
struct TransposePitchLinearThreadMap2DThreadTile {
/// Underlying ThreadMap
using ThreadMap = ThreadMap_;
/// Tensor coordinate
using TensorCoord = typename ThreadMap::TensorCoord;
/// Tile shape
using Shape = typename ThreadMap::Shape;
/// Number of threads total
static int const kThreads = ThreadMap::kThreads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static_assert(kElementsPerAccess > 1 , "Simt transpose requires elements per access to be 1");
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations =
layout::PitchLinearShape<ThreadMap::Iterations::kStrided,
ThreadMap::Iterations::kContiguous>;
static_assert(Iterations::kCount, "Number of iterations must be non-zero");
/// Shape of access by each thread
using ThreadAccessShape = typename ThreadMap::ThreadAccessShape;
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta =
layout::PitchLinearShape<ThreadMap::Delta::kStrided,
ThreadMap::Delta::kContiguous>;
/// Maps thread ID to a coordinate offset within the tensor's logical
/// coordinate space Note this is slightly different from the one of
/// PitchLinearWarpRakedThreadMap.
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
TensorCoord coord = ThreadMap::initial_offset(thread_id);
return TensorCoord(
coord.strided(),
coord.contiguous()
);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 33,392 | C | 35.022654 | 130 | 0.68765 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/warp/vector_fragment_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of a warp vector
that participate in one warp-level mma operation.
Typically, this is used to access the scale/bias fragement of a warp-level mma operation.
The scale/bias vector is then partitioned into smaller fragments that can be fed into
next warp-level mma operation.
This iterator is necessary to accomplish warp-level mma fusion where the scale/bias vector is
applied to the multiplicand for the next mma.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_conversion.h"
namespace cutlass {
namespace transform {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the input fragment tile shape (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
//// Number of elements per access when loading fragment
int ElementsPerAccess>
class VectorFragmentIterator;
// Partial specialization for PitchLinear layout tile
template <
/// Size of the input fragment vector shape (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
//// Number of elements per access when loading fragment
int ElementsPerAccess>
class VectorFragmentIterator<Shape_, Element_,
cutlass::layout::PitchLinear,
InstructionShape_, ElementsPerAccess> {
public:
/// Size of the input threadblock tile shape (concept: MatrixShape)
using Shape = Shape_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::PitchLinear;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Number of participating threads
static int const kThreads = 32;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kRowsPerIteration = 8;
static int const kColumnsPerAccess = 8;
static int const kElementsPerIteration = kRowsPerIteration * InstructionShape::kK / kThreads;
static int const kAccessPerIteration = kElementsPerIteration / kElementsPerAccess;
/// Number of iterations
using Iterations = MatrixShape<InstructionShape::kM / kRowsPerIteration, Shape::kContiguous / kElementsPerIteration>;
public:
//
// Derived quantities
//
// All fragments have kElementsPerAccess scale followed by bias
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one iteration of the iterator.
using Fragment = Array<Element, kElementsPerIteration * Iterations::kRow>;
/// Input threadblock fragment tile
using ThreadblockFragment = Array<Element, Shape::kContiguous >;
private:
/// Internal access type
using AccessType = Array<Element, kElementsPerAccess>;
private:
//
// Data members
//
/// Input threadblock fragment tile
AccessType const *iterator_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
VectorFragmentIterator(ThreadblockFragment const &threadblock_frag)
: iterator_(reinterpret_cast<AccessType const *>(&threadblock_frag)),
index_(0) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
index_ += index_offset;
if(index_ >= Iterations::kColumn)
index_ = 0;
}
/// Increments
CUTLASS_HOST_DEVICE
VectorFragmentIterator &operator++() {
add_offset(1);
return *this;
}
CUTLASS_HOST_DEVICE
void set_index(int idx) {
index_ = idx;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int r = 0; r < Iterations::kRow; r++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kAccessPerIteration; i++) {
frag_ptr[i * Iterations::kRow + r].clear();
frag_ptr[i * Iterations::kRow + r] = iterator_[index_ * kAccessPerIteration + i];
}
}
}
};
// Partial specialization for Row-Major layout tile
template <
/// Size of the input fragment tile shape (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
//// Number of elements per access when loading fragment
int ElementsPerAccess>
class VectorFragmentIterator<Shape_, Element_,
cutlass::layout::RowMajor,
InstructionShape_, ElementsPerAccess> {
public:
/// Size of the input threadblock tile shape (concept: MatrixShape)
using Shape = Shape_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Underlying iterator
using Base = VectorFragmentIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, InstructionShape, ElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one iteration of the iterator.
using Fragment = typename Base::Fragment;
/// Input threadblock fragment tile
using ThreadblockFragment = typename Base::ThreadblockFragment;
private:
/// Underlying iterator
Base iterator_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
VectorFragmentIterator(ThreadblockFragment const &threadblock_frag)
: iterator_(threadblock_frag) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
iterator_.add_offset(index_offset);
}
/// Increments
CUTLASS_HOST_DEVICE
VectorFragmentIterator &operator++() {
add_offset(1);
return *this;
}
CUTLASS_HOST_DEVICE
void set_index(int idx) {
iterator_.set_index(idx);
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 8,828 | C | 30.088028 | 119 | 0.67014 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/thread/unary_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
namespace cutlass {
namespace transform {
namespace thread {
namespace UnaryTransform {
struct Identity; ///< None (i.e., identity)
struct Conjugate; ///< Complex conjugate
}
/// Element-wise unary operator that transforms one element of a fragment at a time
template<
typename FragmentIn, ///< Input Fragment
typename FragmentOut,///< Output Fragment
typename Transform> ///< Unary transform operator
class UnaryOp
{
public:
CUTLASS_DEVICE
static FragmentOut execute(FragmentIn &in)
{
static_assert(FragmentIn::kElements == FragmentOut::kElements, "Number of elements must match.");
static_assert(platform::is_same<Transform, UnaryTransform::Identity>::value ||
platform::is_same<Transform, UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
FragmentOut out;
if (platform::is_same<Transform, UnaryTransform::Identity>::value )
{
CUTLASS_PRAGMA_UNROLL
for (int i=0; i < FragmentIn::kElements; ++i){
out[i] = static_cast<typename FragmentOut::Element>(in[i]);
}
}
else if (platform::is_same<Transform, UnaryTransform::Conjugate>::value )
{
for (int i=0; i < FragmentIn::kElements; ++i){
out[i] = conj(static_cast<typename FragmentOut::Element>(in[i]));
}
}
return out;
}
};
template<typename FragmentIn, typename Transform>
class UnaryOp<FragmentIn, FragmentIn, Transform>
{
public:
CUTLASS_DEVICE
static FragmentIn execute(FragmentIn &in)
{
static_assert(platform::is_same<Transform, UnaryTransform::Identity>::value ||
platform::is_same<Transform, UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
if (platform::is_same<Transform, UnaryTransform::Identity>::value )
{
return in;
}
else if (platform::is_same<Transform, UnaryTransform::Conjugate>::value )
{
for(int i=0; i < FragmentIn::kElements; ++i){
in[i] = conj(in[i]);
}
}
return in;
}
};
}
}
}
| 4,309 | C | 39.660377 | 109 | 0.613832 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/thread/transpose.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Basic copy routines for tensor views
*/
#pragma once
namespace cutlass {
namespace transform {
namespace thread {
/// Transforms a fragment by doing a transpose
template <
int ElementCount,
typename TransposeShape,
typename Element
> struct Transpose;
/// Specialization for int8_t 4x4 transpose
template <int ElementCount_>
struct Transpose<ElementCount_, layout::PitchLinearShape<4,4> , int8_t> {
static const int kElementCount = ElementCount_;
using TransposeShape = layout::PitchLinearShape<4,4>;
using Element = int8_t;
using Fragment = cutlass::Array<Element, kElementCount>;
static_assert(!(kElementCount % TransposeShape::kCount), "Shape needs to be multiple of 16 elements to do a 4x4 transpose");
CUTLASS_DEVICE
void transform(Fragment& dst, Fragment& src) {
// Expose src/dst as int arrays.
int* src_int = reinterpret_cast<int*>(&src);
int* dst_int = reinterpret_cast<int*>(&dst);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementCount / TransposeShape::kCount; i++){
int const i0 = 4 * i + 0;
int const i1 = 4 * i + 1;
int const i2 = 4 * i + 2;
int const i3 = 4 * i + 3;
int a0 = src_int[i0];
int a1 = src_int[i1];
int a2 = src_int[i2];
int a3 = src_int[i3];
int b0, b1, b2, b3, c0;
b0 = __byte_perm(a0, a1, 0x0040);
c0 = __byte_perm(a2, a3, 0x0040);
b0 = __byte_perm(b0, c0, 0x5410);
b1 = __byte_perm(a0, a1, 0x0051);
c0 = __byte_perm(a2, a3, 0x0051);
b1 = __byte_perm(b1, c0, 0x5410);
b2 = __byte_perm(a0, a1, 0x0062);
c0 = __byte_perm(a2, a3, 0x0062);
b2 = __byte_perm(b2, c0, 0x5410);
b3 = __byte_perm(a0, a1, 0x0073);
c0 = __byte_perm(a2, a3, 0x0073);
b3 = __byte_perm(b3, c0, 0x5410);
dst_int[i0] = b0;
dst_int[i1] = b1;
dst_int[i2] = b2;
dst_int[i3] = b3;
}
}
};
} // namespace thread
} // namespace layout
} // namespace cutlass
| 3,835 | C | 34.518518 | 128 | 0.641982 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of tiles
from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last
"residue" tile first, with the objective of minimizing predicate mask updates
during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileAccessIterator2dThreadTile
///
template <typename Shape, typename Element, typename Layout, int AdvanceRank,
typename ThreadMap, typename AccessType>
class PredicatedTileAccessIterator2dThreadTile;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data.
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class PredicatedTileAccessIterator2dThreadTile<Shape_, Element_, layout::PitchLinear,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kPredicatesPerByte = 4;
static int const kPredicatesPerWord = 4 * kPredicatesPerByte;
/// Number of 32b words containing predicates
static int const kPredicateByteCount = (ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kStrided + kPredicatesPerByte - 1) / kPredicatesPerByte;
static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4;
static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u;
static_assert(kPredicateWordCount <= 4, "Too many predicates.");
/// Predicate vector stores mask to guard accesses
using Mask = Array<uint32_t, kPredicateWordCount>;
/// Uses a non-template class
struct Params : PredicatedTileAccessIteratorParams {
public:
friend PredicatedTileAccessIterator2dThreadTile;
using Base = PredicatedTileAccessIteratorParams;
// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) :
Base(layout.stride(0),
MakePredicatedTileAccessIteratorDesc<Shape, Element, Layout, kAdvanceRank, ThreadMap>()()
) { }
CUTLASS_HOST_DEVICE
Params(Base const &base) :
Base(base) { }
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Guard predicates
uint32_t predicates_[kPredicateWordCount];
/// Size of tensor
TensorCoord extent_;
/// Initial offset for each thread
TensorCoord thread_offset_;
/// Index of residue tile
int residue_tile_idx_;
/// Used for out-of-order visitation
bool is_residue_tile_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
/// Tracks iterations within the thread loop
int iteration_thread_;
private:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_HOST_DEVICE
void compute_predicates_(
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0u;
}
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++) {
TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous,
ts + s * ThreadMap::Delta::kStrided);
TensorCoord coord = thread_offset_ + iteration_coord;
bool guard;
if (is_steady_state) {
if (kAdvanceRank == 0) {
guard = (coord.strided() < extent_.strided());
} else {
guard = (coord.contiguous() < extent_.contiguous());
}
} else {
guard = (coord.strided() < extent_.strided() &&
coord.contiguous() < extent_.contiguous());
}
int pred_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided;
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx));
}
}
}
}
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
extent_(extent),
is_residue_tile_(true) {
TensorCoord residue_offset;
if (kAdvanceRank) {
residue_tile_idx_ =
(extent_[kAdvanceRank] - threadblock_offset[kAdvanceRank] - 1) /
Shape::kStrided;
residue_offset = make_Coord(0, residue_tile_idx_ * Shape::kStrided);
} else {
residue_tile_idx_ =
(extent_[kAdvanceRank] - threadblock_offset[kAdvanceRank] - 1) /
Shape::kContiguous;
residue_offset = make_Coord(residue_tile_idx_ * Shape::kContiguous, 0);
}
// Per-thread offset in logical coordinates of tensor
thread_offset_ = threadblock_offset + residue_offset +
ThreadMap::initial_offset(thread_id);
// update internal pointers
Layout layout(params_.stride_);
add_pointer_offset(layout(thread_offset_));
compute_predicates_(false);
set_iteration_index(0);
}
/// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id)
: PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
int residual = index % (ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided);
iteration_strided_ = index / (ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided);
iteration_contiguous_ = residual / ThreadMap::ThreadAccessShape::kStrided;
iteration_thread_ = residual % ThreadMap::ThreadAccessShape::kStrided;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += int(sizeof(Element)) * pointer_offset;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
if (is_residue_tile_) {
TensorCoord residue_offset;
if (kAdvanceRank) {
residue_offset = TensorCoord(0, residue_tile_idx_ * Shape::kStrided);
} else {
residue_offset = TensorCoord(residue_tile_idx_ * Shape::kContiguous, 0);
}
thread_offset_ -= residue_offset;
Layout layout(params_.stride_);
add_pointer_offset(-layout(residue_offset));
compute_predicates_(true);
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * (tile_offset.strided() - 1);
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * (tile_offset.contiguous() - 1);
pointer_ += Shape::kStrided * tile_offset.strided();
}
} else {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * tile_offset.strided();
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * tile_offset.contiguous();
pointer_ += Shape::kStrided * tile_offset.strided();
}
}
is_residue_tile_ = false;
}
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *ret_val = reinterpret_cast<AccessType *>(
pointer_ + (iteration_thread_ * params_.stride_ + iteration_contiguous_ * ThreadMap::Delta::kContiguous) * int(sizeof(Element)));
return ret_val;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile &operator++() {
iteration_thread_++;
if (iteration_thread_ < ThreadMap::ThreadAccessShape::kStrided)
return *this;
iteration_thread_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
pointer_ += params_.inc_strided_;
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile operator++(int) {
PredicatedTileAccessIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = enable ? 0u : predicates_[i];
}
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0xffffffff;
}
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = mask[i];
}
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] = predicates_[i];
}
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
int pred_idx =
iteration_thread_ +
iteration_contiguous_ * ThreadMap::ThreadAccessShape::kStrided +
iteration_strided_ * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided;
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class PredicatedTileAccessIterator2dThreadTile<Shape_, Element_, layout::ColumnMajor,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIterator2dThreadTile<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator2dThreadTile;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))){}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(),
threadblock_offset.column())) {}
/// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile operator++(int) {
PredicatedTileAccessIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class PredicatedTileAccessIterator2dThreadTile<Shape_, Element_, layout::RowMajor,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIterator2dThreadTile<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator2dThreadTile;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))){}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile operator++(int) {
PredicatedTileAccessIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 28,232 | C | 32.811976 | 160 | 0.659039 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_triangular_matrix.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of tiles
from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last
"residue" tile first, with the objective of minimizing predicate mask updates
during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileAccessIteratorTriangularMatrix
///
template <typename Shape, typename Element, typename Layout,
int AdvanceRank, typename ThreadMap,
SideMode kSideMode, FillMode kFillMode, DiagType kDiagType,
typename AccessType>
class PredicatedTileAccessIteratorTriangularMatrix;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for pitch-linear data.
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, SideMode kSideMode, FillMode kFillMode, DiagType kDiagType, typename AccessType_>
class PredicatedTileAccessIteratorTriangularMatrix<Shape_, Element_, layout::PitchLinear,
AdvanceRank, ThreadMap_, kSideMode, kFillMode, kDiagType, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
using CompareOp = typename TrMatrixCompareOp<kFillMode, kDiagType>::Type;
static_assert( kFillMode == FillMode::kFull ||
((kFillMode == FillMode::kLower || kFillMode == FillMode::kUpper) && AccessType::kElements == 1),
"BLAS3 iterator for the triangular/symmetric matrix must use AccessType::kElements as 1");
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static int const kPredicatesPerByte = 4;
static int const kPredicatesPerWord = 4 * kPredicatesPerByte;
static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector;
/// Number of 32b words containing predicates
static int const kPredicateByteCount =
(kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte;
static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4;
static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u;
static_assert(kPredicateWordCount <= 4, "Too many predicates.");
/// Predicate vector stores mask to guard accesses
using Mask = Array<uint32_t, kPredicateWordCount>;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend PredicatedTileAccessIteratorTriangularMatrix;
private:
/// stride of pitch-linear layout (units of Element)
StrideIndex stride_;
/// (true) pitch-linear layout is mapped to row-major matrix
/// (false) pitch-linear layout is mapped to column-major matrix
bool is_row_major_;
/// for vectorized access across the diagonal boundary guard condition is
/// checked for the element on the boundary
int access_diagonal_boundary_;
/// amount (in byte) to increment pointer to move to next access along
/// strided dimension
LongIndex inc_strided_;
/// amount (in byte) to increment pointer from last access to first access
/// of next tile
LongIndex inc_next_;
/// amount (in byte) to increment pointer from first access of current tile
/// to first access of next tile
LongIndex inc_advance_;
public:
// Default ctor
CUTLASS_HOST_DEVICE
Params(): stride_(0), inc_strided_(0), inc_next_(0), inc_advance_(0), is_row_major_(false), access_diagonal_boundary_(0) { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout, bool is_row_major, int access_diagonal_boundary) :
stride_(layout.stride(0)), is_row_major_(is_row_major), access_diagonal_boundary_(access_diagonal_boundary) {
inc_strided_ = (LongIndex(stride_) * ThreadMap::Delta::kStrided) *
sizeof_bits<Element>::value / 8;
if (kAdvanceRank) {
// advance along strided dimension
inc_advance_ =
Shape::kStrided * LongIndex(stride_) * sizeof_bits<Element>::value / 8;
} else {
// advance along contiguous dimension
inc_advance_ = Shape::kContiguous * sizeof_bits<Element>::value / 8;
}
inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kStrided - 1) *
ThreadMap::Delta::kStrided * LongIndex(stride_) *
sizeof_bits<Element>::value / 8;
};
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Guard predicates
uint32_t predicates_[kPredicateWordCount];
/// Track global memory addresses on the diagonal
/// To ignore imag part for diagonal elements of hermitian matrices
uint32_t predicates_onDiag_[kPredicateWordCount];
/// Size of tensor
TensorCoord extent_;
/// Initial offset for each thread
TensorCoord thread_offset_;
/// Iteration along vectors implied by the thread map
int iteration_vector_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
private:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0u;
predicates_onDiag_[i] = 0u;
}
CompareOp compare_op;
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) {
int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int c = access_residual / kAccessesPerVector;
int v = access_residual % kAccessesPerVector;
TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements,
s * ThreadMap::Delta::kStrided);
TensorCoord coord = thread_offset_ + iteration_coord;
bool guard;
bool onDiag = false;
guard = ((coord.strided() < extent.strided()) &&
(coord.contiguous() < extent.contiguous()));
// guard access on the wrong side of the triagular matrix diagonal
if (kFillMode == FillMode::kLower || kFillMode == FillMode::kUpper) {
coord += TensorCoord{params_.access_diagonal_boundary_, 0};
bool triagular_guard_row_major = compare_op(coord.strided(), coord.contiguous()) | !params_.is_row_major_;
bool triagular_guard_col_major = compare_op(coord.contiguous(), coord.strided()) | params_.is_row_major_;
guard = guard && triagular_guard_row_major && triagular_guard_col_major;
if (kDiagType == DiagType::kUnit) {
onDiag = (guard && coord.strided() == coord.contiguous()) ? true : false;
}
}
int pred_idx_onDiag = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx_onDiag = pred_idx_onDiag / kPredicatesPerWord;
int residual_onDiag = pred_idx_onDiag % kPredicatesPerWord;
int byte_idx_onDiag = residual_onDiag / kPredicatesPerByte;
int bit_idx_onDiag = residual_onDiag % kPredicatesPerByte;
predicates_onDiag_[word_idx_onDiag] |= (unsigned(onDiag) << (byte_idx_onDiag * 8 + bit_idx_onDiag));
int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx));
}
}
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(const_cast<NonConstPointer>(pointer))),
extent_(extent) {
// Per-thread offset in logical coordinates of tensor
thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id);
// update internal pointers
Layout layout(params_.stride_);
add_pointer_offset(layout(thread_offset_));
compute_predicates_(extent_);
set_iteration_index(0);
}
/// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id)
: PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided());
pointer_ += Shape::kContiguous * tile_offset.contiguous();
thread_offset_ += TensorCoord{0, Shape::kStrided * tile_offset.strided()};
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous());
pointer_ += Shape::kStrided * tile_offset.strided();
thread_offset_ += TensorCoord{Shape::kContiguous * tile_offset.contiguous(), 0};
}
compute_predicates_(extent_);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) / 8) + iteration_vector_;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
pointer_ += params_.inc_strided_;
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix operator++(int) {
PredicatedTileAccessIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = enable ? 0u : predicates_[i];
}
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0xffffffff;
}
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = mask[i];
}
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] = predicates_[i];
}
}
/// Return if the address in on the diagonal
CUTLASS_HOST_DEVICE
bool getOnDiag() {
int pred_idx =
iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_onDiag_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
int pred_idx =
iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
//return true;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for column-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_,
SideMode kSideMode, FillMode kFillMode, DiagType kDiagType,
typename AccessType_>
class PredicatedTileAccessIteratorTriangularMatrix<Shape_, Element_, layout::ColumnMajor,
AdvanceRank, ThreadMap_, kSideMode, kFillMode, kDiagType,
AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIteratorTriangularMatrix<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap,
kSideMode, kFillMode, kDiagType, AccessType>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
static int const kAccessDiagonalBoundary =
(kFillMode == FillMode::kLower) ? (AccessType::kElements - 1) : 0;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorTriangularMatrix;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0)), false, kAccessDiagonalBoundary){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(),
threadblock_offset.column())) {}
/// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix operator++(int) {
PredicatedTileAccessIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Return if the address in on the diagonal
CUTLASS_HOST_DEVICE
bool getOnDiag() {
return iterator_.getOnDiag();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_,
SideMode kSideMode, FillMode kFillMode, DiagType kDiagType,
typename AccessType_>
class PredicatedTileAccessIteratorTriangularMatrix<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_,
kSideMode, kFillMode, kDiagType, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIteratorTriangularMatrix<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap,
kSideMode, kFillMode, kDiagType, AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
static int const kAccessDiagonalBoundary =
(kFillMode == FillMode::kUpper) ? (AccessType::kElements - 1) : 0;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorTriangularMatrix;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0)), true, kAccessDiagonalBoundary){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix operator++(int) {
PredicatedTileAccessIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Return if the address in on the diagonal
CUTLASS_HOST_DEVICE
bool getOnDiag() {
return iterator_.getOnDiag();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 31,412 | C | 34.176932 | 129 | 0.669426 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of tiles
from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <typename Shape, typename Element, typename Layout, int AdvanceRank,
typename ThreadMap,
bool Dynamic_iterations = false,
int Alignment =
sizeof_bits<Element>::value* ThreadMap::kElementsPerAccess / 8
>
class RegularTileAccessIteratorDirectConv;
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps with dynamic_iterations OFF
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIteratorDirectConv<
Shape_, Element_,
layout::PitchLinear,
AdvanceRank, ThreadMap_, false, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Element type per access
using AccessType = Array<Element, ThreadMap::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: stride_(ref.stride(0) / ThreadMap::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_base));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_num(int num) {
//Do nothing
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv operator++(int) {
RegularTileAccessIteratorDirectConv prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset in the unit of tile.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.contiguous() * Shape::kContiguous +
coord.strided() * ThreadMap::Iterations::kStrided *
ThreadMap::Delta::kStrided * stride_ * ThreadMap::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps with dynamic_iterations ON
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIteratorDirectConv<
Shape_, Element_,
layout::PitchLinear,
AdvanceRank, ThreadMap_,true, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Element type per access
using AccessType = Array<Element, ThreadMap::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
/// Total iterattions in the strided dimension: Dynamic value
int total_iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: stride_(ref.stride(0) / ThreadMap::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_base));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_num(int num) {
total_iteration_strided_ = num;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < total_iteration_strided_) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv operator++(int) {
RegularTileAccessIteratorDirectConv prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset in the unit of tile.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.contiguous() * Shape::kContiguous +
coord.strided() * total_iteration_strided_ * ThreadMap::Delta::kStrided * stride_ *
ThreadMap::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for column major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_,bool Dynamic_iterations, int Alignment >
class RegularTileAccessIteratorDirectConv<
Shape_, Element_,
layout::ColumnMajor,
AdvanceRank, ThreadMap_, Dynamic_iterations , Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIteratorDirectConv<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap_,
Dynamic_iterations>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_num(int num) {
iterator_.set_iteration_num(num);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv operator++(int) {
RegularTileAccessIteratorDirectConv prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for row major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_,bool Dynamic_iterations, int Alignment>
class RegularTileAccessIteratorDirectConv<
Shape_, Element_,
layout::RowMajor,
AdvanceRank, ThreadMap_, Dynamic_iterations, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIteratorDirectConv<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap_,
Dynamic_iterations>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_num(int num) {
iterator_.set_iteration_num(num);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv operator++(int) {
RegularTileAccessIteratorDirectConv prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 18,623 | C | 30.673469 | 106 | 0.66391 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_vector_access_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of loading small
vectors from the global memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedVectorAccessIterator
///
template <
/// Shape of the vector accessed by the entire threadblock
typename Shape,
/// Shape of the vector accessed by the warp
typename WarpShape,
/// Type of Element
typename Element,
/// Layout of the vector
typename Layout,
/// Number of elements for each access
int ElementsPerAccess,
/// Support residual tile
bool EnableResidualAccess = false
>
class PredicatedVectorAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Vector access iterator specialized for vectors, e.g. scale and bias
/// Thread arrangements are for TensorOps
///
template <
typename Shape_,
typename WarpShape_,
typename Element_,
int ElementsPerAccess,
bool EnableResidualAccess
>
class PredicatedVectorAccessIterator <
Shape_,
WarpShape_,
Element_,
layout::PitchLinear,
ElementsPerAccess,
EnableResidualAccess
> {
public:
using Shape = Shape_;
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
// static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kThreads = 32;
static int const kRowsPerIteration = 8;
static int const kThreadsPerRow = kThreads / kRowsPerIteration;
static int const kThreadsPerRowMask = 0x3;
static int const kIterations = WarpShape::kContiguous / (kThreadsPerRow * kElementsPerAccess);
static int const kWarpCountStrided = Shape::kStrided / WarpShape::kStrided;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Extent of tensor
TensorCoord extent_;
/// pointer offset of each thread
TensorCoord thread_offset_;
/// iteration index
LongIndex iteration_;
/// residual access
bool is_residual_;
/// residual offset of each thread
TensorCoord residual_offset_;
public:
/// Constructs a vector access iterator
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
/// Pointer to the start of the vector
ConstPointer pointer,
/// Extent of vector
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// ID of each participating warp
int warp_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
extent_(extent),
is_residual_(false) {
int warp_offset = (warp_id / kWarpCountStrided) * WarpShape::kContiguous;
// Per-thread offset in logical coordinates of tensor
thread_offset_ = threadblock_offset + TensorCoord(warp_offset, 0) +
TensorCoord((thread_id & kThreadsPerRowMask) * kElementsPerAccess, 0);
set_iteration_index(0);
if(EnableResidualAccess) {
// compute residual offset
typename TensorCoord::Index residual_size = extent_.contiguous() % WarpShape::kContiguous;
if (residual_size) {
is_residual_ = true;
residual_offset_ = make_Coord(residual_size, 0);
}
}
}
/// Construct a PredicatedVectorAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
/// Pointer to start of vector
ConstPointer pointer,
/// Extent of vector
TensorCoord extent,
///< ID of each participating thread
int thread_id,
/// ID of each participating warp
int warp_id)
: PredicatedVectorAccessIterator(pointer, extent, thread_id, warp_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_ = index;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
thread_offset_ =
thread_offset_ +
TensorCoord(WarpShape::kContiguous * tile_offset.contiguous(), 0);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
((thread_offset_.contiguous() + iteration_ * kThreadsPerRow * kElementsPerAccess)
* sizeof_bits<Element>::value / 8));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator &operator++() {
++iteration_;
if(iteration_ >= kIterations)
iteration_ = 0;
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
void advance() {
if(EnableResidualAccess && is_residual_) {
is_residual_ = false;
thread_offset_ += residual_offset_;
}
else
add_tile_offset(TensorCoord(1, 0));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator operator++(int) {
PredicatedVectorAccessIterator self(*this);
operator++();
return self;
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return ((thread_offset_.contiguous() +
iteration_ * kThreadsPerRow * kElementsPerAccess) < extent_.contiguous());
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedVectorAccessIterator for row-major data.
///
template <
typename Shape_,
typename WarpShape_,
typename Element_,
int ElementsPerAccess,
bool EnableResidualAccess
>
class PredicatedVectorAccessIterator<
Shape_,
WarpShape_,
Element_,
layout::RowMajor,
ElementsPerAccess,
EnableResidualAccess
> {
public:
using Shape = Shape_;
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedVectorAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
layout::PitchLinearShape<WarpShape::kColumn, WarpShape::kRow>,
Element,
layout::PitchLinear,
ElementsPerAccess,
EnableResidualAccess>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
static int const kRowsPerIteration = UnderlyingIterator::kRowsPerIteration;
static int const kThreads = UnderlyingIterator::kThreads;
static int const kIterations = UnderlyingIterator::kIterations;
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
///< Pointer to the start of the vector
ConstPointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< ID of each participating warp
int warp_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(pointer, layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id, warp_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedVectorAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
ConstPointer pointer, ///< Pointer to the start of the vector
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
int warp_id ///< ID of each participating warp
)
: PredicatedVectorAccessIterator(pointer, extent, thread_id, warp_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator operator++(int) {
PredicatedVectorAccessIterator self(*this);
operator++();
return self;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
void advance() {
iterator_.advance();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
| 13,088 | C | 30.313397 | 100 | 0.669927 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_access_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of scale and bias vectors.
This iterator uses masks to guard out-of-bounds accesses.
It can be used to load the gamma and beta vectors of layernorm which is loop variant.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedScaleBiasVectorAccessIterator
///
template <typename ThreadblockShape,
typename Element,
typename Layout>
class PredicatedScaleBiasVectorAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for fprop pitch-linear data.
///
template <typename ThreadblockShape_, typename Element_>
class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_,
Element_,
layout::PitchLinear> {
public:
using ThreadblockShape = ThreadblockShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
static int const kThreads = ThreadblockShape::kContiguous / kElementsPerAccess;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Internal pointer to first access of tile
BytePointer pointer_;
TensorCoord thread_offset_;
int problem_size_k_;
/// Used for out-of-order visitation
bool is_residue_tile_;
bool guard_;
TensorCoord::Index residue_size_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Extent of tensor
int problem_size_k,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset) {
pointer_ = (thread_id < kThreads)
? reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(scale_pointer))
: reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(bias_pointer));
// Per-thread offset in logical coordinates of tensor
int thread_base = (thread_id < kThreads) ? 0 : kThreads;
problem_size_k_ = problem_size_k;
is_residue_tile_ = true;
residue_size_ = (problem_size_k_ - threadblock_offset.contiguous()) % ThreadblockShape::kContiguous;
if (residue_size_ == 0) {
residue_size_ = ThreadblockShape::kContiguous;
}
guard_ = ((thread_id - thread_base) * kElementsPerAccess) < residue_size_;
thread_offset_ =
threadblock_offset +
TensorCoord((thread_id - thread_base) * kElementsPerAccess, 0);
set_iteration_index(0);
}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Extent of tensor
int problem_size_k,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorAccessIterator(problem_size_k,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {}
/// Advances an iterator along logical dimensions of matrix in units of whole threadblock tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
guard_ = threadIdx.x < kThreads * 2;
TensorCoord offset = is_residue_tile_ ?
TensorCoord(residue_size_ + ThreadblockShape::kContiguous * (tile_offset.contiguous() - 1), 0)
: TensorCoord(ThreadblockShape::kContiguous * tile_offset.contiguous(), 0);
thread_offset_ =
thread_offset_ +
offset;
is_residue_tile_ = false;
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
(thread_offset_.contiguous() * sizeof_bits<Element>::value / 8));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator &operator++() {
return *this;
}
/// Increment and return an instance to self.
CUTLASS_DEVICE
PredicatedScaleBiasVectorAccessIterator operator++(int) {
PredicatedScaleBiasVectorAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
guard_ &= (!enable);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return guard_;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename ThreadblockShape_,
typename Element_>
class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_,
Element_,
layout::RowMajor> {
public:
using ThreadblockShape = ThreadblockShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedScaleBiasVectorAccessIterator<
layout::PitchLinearShape<ThreadblockShape::kColumn, ThreadblockShape::kRow>,
Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
///< Extent of tensor
int problem_size_k,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(problem_size_k, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
int problem_size_k, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorAccessIterator(problem_size_k,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// threadblock tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator operator++(int) {
PredicatedScaleBiasVectorAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 12,890 | C | 33.284574 | 104 | 0.652444 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator_triangular_matrix.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of tiles from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile
first, with the objective of minimizing predicate mask updates during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be stored in registers,
and integer addition is used to advance the pointer through memory.
*/
#pragma once
#include "cutlass/arch/memory.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_triangular_matrix.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileIteratorTriangularMatrix
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
/// Regular tile iterator using a precomputed control structure to minimize register liveness
/// and integer arithmetic.
///
/// Layout is assumed to be invariant at the time the precomputed "Params" object is constructed.
///
/// Base pointer and tensor extents may be specified at the time the iterator is constructed.
/// Subsequently, they are assumed to be immutable.
///
/// Adding a logical coordinate offset may be performed at the time the iterator is constructed.
/// Subsequent additions to logical coordinate offset may be performed but are relatively expensive.
///
/// Vistitation order is intended to first visit a "residual" tile that may be partially full in
/// both the advance dimension and the steady-state dimension. This is assumed to be the last
/// tile in the iteration sequence. Advancing an iterator that has just been constructed moves to
/// the first tile that is full in the advance dimension and recomputes predicates. Subsequent
/// accesses may be performed without updating internal predicates and are efficient in terms of
/// live register state and pointer arithmetic instructions.
///
/// To be efficient, this assumes the iteraor will be dereferenced and advanced at least once
/// outside any looping structure to minimize integer arithmetic.
///
/// Acceses out of bounds are safe so long as `clear_mask()` is called prior to dereferencing
/// the iterator.
///
///
/// Example:
///
/// An efficient pipeline structure may be constructed as follows:
///
// template <typename Iterator>
// __global__ void kernel(
// typename Iterator::Params params,
// typename Iterator::Element *ptr,
// TensorCoord extent) {
//
// typename Iterator::Fragment fragment;
//
// TensorCoord threadblock_offset(0, 0);
//
// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets);
//
//
// fragment = *iter; // load "residue" tile first
// ++iter; // advance to first "steady state" tile and update internal masks
//
//
// #pragma unroll
// for (int i = Remaining - 1; i >= 0; --i) {
//
// f(fragment);
//
// if (!i) {
// iter.clear_mask(); // light-weight operation to clear masks - subsequent loads become NO-OPs.
// }
//
// fragment = *iter; // load tile during "steady state" phase
// ++iter; // advance to next tile - lightweight due to steady-state masks
// }
// }
//
// void host(TensorView<Element, 2, layout::PitchLinear> view) {
//
// using Iterator = transform::threadblock::PredicatedTileIteratorTriangularMatrix;
//
// typename Iterator::Params params(view.layout());
//
// kernel<Iterator>(params, view.data());
// }
///
///
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
SideMode kSideMode,
FillMode kFillMode,
DiagType kDiagType,
int AccessSize = ThreadMap::kElementsPerAccess
>
class PredicatedTileIteratorTriangularMatrix;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIteratorTriangularMatrix for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_,
SideMode kSideMode, FillMode kFillMode, DiagType kDiagType,
int AccessSize>
class PredicatedTileIteratorTriangularMatrix<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_,
kSideMode, kFillMode, kDiagType,
AccessSize> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
/// Type used for internal memory accesses
using AccessType = AlignedArray<Element, AccessSize, (AccessSize * sizeof_bits<Element>::value / 8)>;
/// Underlying iterator to compute the addresses
using TileAccessIterator =
PredicatedTileAccessIteratorTriangularMatrix<Shape, Element, Layout, kAdvanceRank,
ThreadMap, kSideMode, kFillMode, kDiagType, AccessType>;
static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename TileAccessIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend PredicatedTileIteratorTriangularMatrix;
private:
/// Parameters object
typename TileAccessIterator::Params params_;
public:
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : params_(layout) { }
CUTLASS_HOST_DEVICE
Params() { }
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: address_iterator_(params.params_, pointer, extent, thread_id,
threadblock_offset) {}
/// Construct a PredicatedTileIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileIteratorTriangularMatrix(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix &operator++() {
if (kAdvanceRank)
address_iterator_.add_tile_offset({0, 1});
else
address_iterator_.add_tile_offset({1, 0});
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix operator++(int) {
PredicatedTileIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { address_iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { address_iterator_.get_mask(mask); }
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
address_iterator_.set_iteration_index(idx);
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + byte_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
cutlass::arch::global_load<AccessType,
sizeof(AccessType)
>(
frag_ptr[idx], access_ptr, address_iterator_.valid());
++address_iterator_;
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_byte_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
address_iterator_.set_iteration_index(0);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset;
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr);
if (address_iterator_.valid()) {
*access_ptr = frag_ptr[idx];
}
++address_iterator_;
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_byte_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIteratorTriangularMatrix for column-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
SideMode kSideMode,
FillMode kFillMode,
DiagType kDiagType,
int AccessSize
>
class PredicatedTileIteratorTriangularMatrix<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_,
kSideMode, kFillMode, kDiagType,
AccessSize> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIteratorTriangularMatrix<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
kSideMode,
kFillMode,
kDiagType,
AccessSize
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIteratorTriangularMatrix;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {
}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset ///< Initial offset of threadblock
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())
) { }
/// Construct a PredicatedTileIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIteratorTriangularMatrix(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix operator++(int) {
PredicatedTileIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
iterator_.store_with_byte_offset(frag, byte_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIteratorTriangularMatrix for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
SideMode kSideMode,
FillMode kFillMode,
DiagType kDiagType,
int AccessSize
>
class PredicatedTileIteratorTriangularMatrix<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_,
kSideMode, kFillMode, kDiagType,
AccessSize> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIteratorTriangularMatrix<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
kSideMode,
kFillMode,
kDiagType,
AccessSize
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIteratorTriangularMatrix;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {
};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset ///< Initial offset of threadblock
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())
) { }
/// Construct a PredicatedTileIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIteratorTriangularMatrix(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix operator++(int) {
PredicatedTileIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
iterator_.store_with_byte_offset(frag, byte_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 28,064 | C | 33.267399 | 109 | 0.666334 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of tiles
from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::PitchLinear,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Element type per access
using AccessType = Array<Element, ThreadMap::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: stride_(ref.stride(0) / ThreadMap::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_base));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset in the unit of tile.
/// In GEMM/Conv implementation, this is used to move in the k dimension in the shared memory.
/// Below layouts are the shared memory layouts. Current SM50 SIMT kernels only use col major A and row major B.
/// For row major A operand, k dimension is contiguous dimension;
/// For col major A operand, k dimension is strided dimension;
/// For row major B operand, k dimension is strided dimension;
/// For col major B operand, k dimension is contiguous dimension.
/// Below two classes map col/row major to the pitch linear coordinates used
/// in this base class.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.contiguous() * Shape::kContiguous +
coord.strided() * Shape::kStrided * stride_ *
ThreadMap::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for column major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajor,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for row major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::RowMajor,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 13,283 | C | 31.479218 | 115 | 0.661823 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/ell_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Ell iterator for matrix of indices (ellColInd matrix)
*/
#pragma once
namespace cutlass {
namespace transform {
namespace threadblock {
namespace ell{
constexpr unsigned int SmemPow = 8;
constexpr unsigned int SmemStages = 2;
constexpr unsigned int SmemSize = 1 << SmemPow;
constexpr unsigned int SmemMask = (SmemSize*SmemStages-1);
class SharedStorage{
public:
Array<int, SmemSize*SmemStages> array;
};
class Iterator{
public:
using Layout = layout::PitchLinear;
using LongIndex = typename Layout::LongIndex;
private:
const int *gmem_col_idx_;
int *smem_col_idx_;
const int block_size_;
const int base_idx_;
const int k_shape_;
const int ell_increment_;
const int array_length_;
int col_idx_base_;
int residue_;
int counter_;
int pow2_;
int residue_shape_;
int smem_offset_;
int smem_stage_;
int gmem_offset_;
int lane_;
bool is_pow2_;
bool is_residue_tile_;
public:
CUTLASS_DEVICE
void load_ell_indices(){
for(int i=threadIdx.x; i<SmemSize; i+=blockDim.x){
int idx = (gmem_offset_+i < array_length_) ? gmem_offset_+i : array_length_-1;
int gmem_col_idx = gmem_col_idx_[idx] - base_idx_;
smem_col_idx_[i + smem_stage_ * SmemSize] =
(gmem_col_idx >= 0) ? gmem_col_idx : -1;
}
gmem_offset_ += SmemSize;
smem_stage_ ^= 1;
}
CUTLASS_DEVICE
Iterator(
SharedStorage& shared_storage_base,
const int* col_idx,
const int& block_size,
const int& base_idx,
const int k_shape,
const int& problem_size_k,
const int& ell_stride,
const int& thread_idx)
: residue_(0),
counter_(0),
smem_offset_(0),
smem_stage_(0),
gmem_offset_(0),
block_size_(block_size),
base_idx_(base_idx),
k_shape_(k_shape),
ell_increment_(ell_stride * block_size),
array_length_((problem_size_k + block_size_ - 1) / block_size_),
residue_shape_(problem_size_k % k_shape_),
is_residue_tile_(residue_shape_ != 0),
smem_col_idx_(reinterpret_cast<int*>(&shared_storage_base.array)),
gmem_col_idx_(const_cast<int*>(col_idx)),
lane_(thread_idx % 32) {
load_ell_indices();
__syncthreads();
is_pow2_ = ((block_size_ & (block_size_ - 1)) == 0);
if( is_pow2_ && k_shape <= block_size_ ) lane_ = 0;
col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_;
pow2_ = 0;
while(block_size_ >> (pow2_ + 1)) ++pow2_;
}
CUTLASS_DEVICE
int get_blocksize(){
return block_size_;
}
CUTLASS_DEVICE
Iterator &operator++(){
if(is_residue_tile_){
residue_ += residue_shape_;
is_residue_tile_ = false;
} else {
residue_ += k_shape_;
}
if(residue_ < block_size_){
return *this;
}
if((array_length_ > SmemSize) && (((smem_offset_ >> SmemPow) & 1) != smem_stage_))
load_ell_indices();
if(residue_ == block_size_){
++smem_offset_;
counter_ += ell_increment_;
residue_ = 0;
col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_ - counter_;
return *this;
}
if(is_pow2_){
smem_offset_ += residue_ >> pow2_;
counter_ += (residue_ >> pow2_) * ell_increment_;
residue_ = residue_ & ((1 << pow2_) - 1);
}
else {
smem_offset_ += residue_ / block_size_;
counter_ += (residue_ / block_size_) * ell_increment_;
residue_ %= block_size_;
}
col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_ - counter_;
return *this;
}
CUTLASS_DEVICE
LongIndex get_offset(const int& idx) {
int num_jump_tiles;
if(is_pow2_)
num_jump_tiles = (idx + residue_) >> pow2_;
else
num_jump_tiles = (idx + residue_) / block_size_;
int tmp = __shfl_sync(0xffffffff, col_idx_base_, num_jump_tiles);
return tmp - num_jump_tiles * ell_increment_;
}
CUTLASS_DEVICE
LongIndex get_offset_fast() {
return col_idx_base_;
}
};
}
}
}
}
| 6,181 | C | 29.91 | 101 | 0.589063 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of tiles from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile
first, with the objective of minimizing predicate mask updates during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be stored in registers,
and integer addition is used to advance the pointer through memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "regular_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
int Alignment = sizeof_bits<Element>::value * ThreadMap::kElementsPerAccess / 8
>
class RegularTileIterator2dThreadTile;
/// Regular tile iterator specialized for pitch-linear + 2d thread-tiled threadmapping
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator2dThreadTile<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the contiguous or strided dimensions.");
private:
//
// Types
//
using AccessType = AlignedArray<Element, ThreadMap::ThreadAccessShape::kCount, kAlignment>;
//
// Data members
//
/// Pointer to memory
uint8_t *pointer_;
/// Stride quantity
StrideIndex stride_;
/// Amount to increment pointer along strided dimension
LongIndex increment_strided_;
/// Amount to advance pointer between tiles
LongIndex increment_advance_;
public:
CUTLASS_DEVICE
RegularTileIterator2dThreadTile(): pointer_(nullptr), increment_strided_(0), increment_advance_(0) { }
CUTLASS_DEVICE
RegularTileIterator2dThreadTile(
TensorRef const &ref,
int thread_idx,
int interleave
){
TensorCoord t = ThreadMap::initial_offset(thread_idx);
long int offset = t[0] * interleave + t[1] * ref.stride()[0]/interleave;
pointer_ = reinterpret_cast<uint8_t *>(ref.data() + offset);
stride_ = ref.stride()[0] / interleave;
increment_strided_ = (ref.stride()[0] * sizeof_bits<Element>::value / 8) * ThreadMap::Delta::kStrided / interleave;
increment_advance_ =
(kAdvanceRank == 0 ?
Shape::kContiguous * sizeof_bits<Element>::value / 8 :
Shape::kStrided * (ref.stride()[0] * sizeof_bits<Element>::value / 8) / interleave);
}
/// Loads a fragment
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
uint8_t const *byte_pointer = pointer_ + pointer_offset * sizeof_bits<Element>::value / 8;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_pointer);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int idx = c + s * ThreadMap::Iterations::kContiguous;
frag_ptr[idx] = access_ptr[c * ThreadMap::Delta::kContiguous / ThreadMap::ThreadAccessShape::kStrided];
}
if (s + 1 < ThreadMap::Iterations::kStrided) {
byte_pointer += increment_strided_;
}
}
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
load_with_pointer_offset(
frag,
tile_offset.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess +
tile_offset.strided() * Shape::kStrided * stride_
);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&frag);
uint8_t *byte_pointer = pointer_ + pointer_offset * sizeof_bits<Element>::value / 8;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_pointer);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int idx = c + s * ThreadMap::Iterations::kContiguous;
access_ptr[c * ThreadMap::Delta::kContiguous / ThreadMap::ThreadAccessShape::kStrided] = frag_ptr[idx];
}
if (s + 1 < ThreadMap::Iterations::kStrided) {
byte_pointer += increment_strided_;
}
}
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
store_with_pointer_offset(
frag,
tile_offset.contiguous() * Shape::kContiguous + tile_offset.strided() * Shape::kStrided * stride_
);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator++() {
pointer_ += increment_advance_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator--() {
pointer_ -= increment_advance_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
int offset = sizeof_bits<Element>::value *
(coord.contiguous() * Shape::kContiguous + coord.strided() * Shape::kStrided * stride_) / 8;
add_pointer_offset(offset);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Regular tile iterator specialized for interleaved layout + 2d thread-tiled threadmapping
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator2dThreadTile<Shape_, Element_, layout::RowMajorInterleaved<4>, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorInterleaved<4>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
using Underlying = RegularTileIterator2dThreadTile<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
kAlignment
>;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the row or column dimensions.");
private:
Underlying iterator_;
public:
CUTLASS_DEVICE
RegularTileIterator2dThreadTile() { }
CUTLASS_DEVICE
RegularTileIterator2dThreadTile(
TensorRef const &ref,
int thread_idx
):
iterator_({ref.data(), ref.stride()}, thread_idx, 4) {
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
iterator_.load_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()});
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
iterator_.load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
iterator_.store_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()});
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
iterator_.store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator--() {
--iterator_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Regular tile iterator specialized for interleaved layout + 2d thread-tiled threadmapping
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator2dThreadTile<Shape_, Element_, layout::ColumnMajorInterleaved<4>, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorInterleaved<4>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
using PitchLinearThreadMap = PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
ThreadMap::kThreads, ThreadMap::ThreadAccessShape::kCount >;
using Underlying = RegularTileIterator2dThreadTile<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap
>;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the row or column dimensions.");
private:
Underlying iterator_;
public:
CUTLASS_DEVICE
RegularTileIterator2dThreadTile() { }
CUTLASS_DEVICE
RegularTileIterator2dThreadTile(
TensorRef const &ref,
int thread_idx
):
iterator_({ref.data(), ref.stride()}, thread_idx, 4) {
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
iterator_.load_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()});
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
iterator_.load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
iterator_.store_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()});
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
iterator_.store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator--() {
--iterator_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
| 15,486 | C | 29.366667 | 128 | 0.671251 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of tiles
from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCongruous64b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicandCongruous64b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 64;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 64b");
///< Number of pointers
static int const kPointerCount = 1;
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous +
coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCongruous64b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous64b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCongruous64b,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCongruous64b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCongruous64b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCongruous64b,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for crosswise arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicand64bCrosswise,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicand64bCrosswise;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 64;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 64b");
///< Number of pointers - two pointers are needed if making more than 4 iterations along
///< strided dimension
static int const kPointerCount = (ThreadMap::Iterations::kStrided > 4 ? 2 : 1);
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_[Detail::kPointerCount];
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / ThreadMap::kElementsPerAccess) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data());
byte_offset_[0] = ref.offset(thread_offset_in_threadblock_tile) * sizeof(Element);
if (Detail::kPointerCount == 2) {
byte_offset_[1] = byte_offset_[0] ^ 8;
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset / ThreadMap::kElementsPerAccess;
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
// Map the logical contiguous and strided access to the internal swizzled structure.
int uniform_offset = (iteration_strided_ & 0x3) * stride_ + (iteration_strided_ >> 3) * 16 + stride_ * ThreadMap::Delta::kContiguous * iteration_contiguous_;
char *access_byte_ptr = reinterpret_cast<char *>(pointer_ + uniform_offset);
int byte_offset;
// This iterator may require two byte offsets if it must load more than 8 rows (or 2 iterations)
// in the strided dimension
if (Detail::kPointerCount == 2 && (iteration_strided_ & 0x4)) {
byte_offset = byte_offset_[1];
}
else {
byte_offset = byte_offset_[0];
}
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.strided() * Shape::kStrided + coord.contiguous() * Shape::kContiguous * stride_);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicand64bCrosswise,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicand64bCrosswise,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicand64bCrosswise,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicand64bCrosswise;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicand64bCrosswise,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCongruous128b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicandCongruous128b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128b");
///< Number of pointers
static int const kPointerCount = 1;
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous +
coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCongruous128b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous128b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCongruous128b,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCongruous128b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCongruous128b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCongruous128b,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCrosswise128x4,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicandCrosswise128x4;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128b");
///< Number of pointers
static int const kPointerCount = 1;
};
static_assert(!(ThreadMap::Iterations::kStrided % 2), "This iterator requires at least two iterations along the strided dimension");
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int offset_c = (iteration_contiguous_ * ThreadMap::Delta::kContiguous + (iteration_strided_ & 1) * 2);
int offset_s = (iteration_strided_ / 2) * 8;
int access_offset = offset_c * stride_ + offset_s;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous * stride_ +
coord.strided() * Shape::kStrided * Layout::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCrosswise128x4,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCrosswise128x4,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCrosswise128x4,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCrosswise128x4;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCrosswise128x4,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 47,789 | C | 30.174168 | 161 | 0.668627 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/ell_predicated_tile_access_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Ell iterator for Blocked-Ell matrix (ellValue matrix) used with EllMmaMultistage
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// EllPredicatedTileAccessIterator
///
template <typename Shape, typename Element, typename Layout, int AdvanceRank,
typename ThreadMap, typename AccessType>
class EllPredicatedTileAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for pitch-linear data.
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class EllPredicatedTileAccessIterator<Shape_, Element_, layout::PitchLinear,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static int const kPredicatesPerByte = 4;
static int const kPredicatesPerWord = 4 * kPredicatesPerByte;
static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector;
/// Number of 32b words containing predicates
static int const kPredicateByteCount =
(kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte;
static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4;
static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u;
static_assert(kPredicateWordCount <= 4, "Too many predicates.");
/// Predicate vector stores mask to guard accesses
using Mask = Array<uint32_t, kPredicateWordCount>;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend EllPredicatedTileAccessIterator;
private:
/// stride of pitch-linear layout (units of Element)
LongIndex stride_;
/// amount (in byte) to increment pointer to move to next access along
/// strided dimension
LongIndex inc_strided_;
/// amount (in byte) to increment pointer from last access to first access
/// of next tile
LongIndex inc_next_;
/// amount (in byte) to increment pointer from first access of current tile
/// to first access of next tile
LongIndex inc_advance_;
public:
// Default ctor
CUTLASS_HOST_DEVICE
Params(): stride_(0), inc_strided_(0), inc_next_(0), inc_advance_(0) { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : stride_(layout.stride(0)) {
inc_strided_ = (LongIndex(stride_) * ThreadMap::Delta::kStrided) *
sizeof_bits<Element>::value / 8;
if (kAdvanceRank) {
// advance along strided dimension
inc_advance_ =
Shape::kStrided * LongIndex(stride_) * sizeof_bits<Element>::value / 8;
} else {
// advance along contiguous dimension
inc_advance_ = Shape::kContiguous * sizeof_bits<Element>::value / 8;
}
inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kStrided - 1) *
ThreadMap::Delta::kStrided * LongIndex(stride_) *
sizeof_bits<Element>::value / 8;
};
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Guard predicates
uint32_t predicates_[kPredicateWordCount];
/// Size of tensor
TensorCoord extent_;
/// Initial offset for each thread
TensorCoord thread_offset_;
/// Offset to the first steady-state tile
TensorCoord residue_offset_;
/// Initial offset to define ELL block
TensorCoord ell_offset_;
/// Used for out-of-order visitation
bool is_residue_tile_;
/// Iteration along vectors implied by the thread map
int iteration_vector_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent,
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0u;
}
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) {
int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int c = access_residual / kAccessesPerVector;
int v = access_residual % kAccessesPerVector;
TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements,
s * ThreadMap::Delta::kStrided);
TensorCoord coord = thread_offset_ + iteration_coord;
bool guard;
if (is_steady_state) {
if (kAdvanceRank == 0) {
guard = (coord.strided() < extent.strided());
} else {
guard = (coord.contiguous() < extent.contiguous());
}
} else {
guard = (coord.strided() < extent.strided() &&
coord.contiguous() < extent.contiguous());
}
int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx));
}
}
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
extent_(extent),
is_residue_tile_(true) {
TensorCoord residue_extent;
if (kAdvanceRank) {
typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.strided()) % Shape::kStrided;
if (!residue_size) {
residue_size = Shape::kStrided;
}
residue_offset_ = make_Coord(0, residue_size);
residue_extent = make_Coord(
extent_.contiguous(),
min(threadblock_offset.strided() + residue_size, extent_.strided())
);
} else {
typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.contiguous()) % Shape::kContiguous;
if (!residue_size) {
residue_size = Shape::kContiguous;
}
residue_offset_ = make_Coord(residue_size, 0);
residue_extent = make_Coord(
min(extent_.contiguous(), threadblock_offset.contiguous() + residue_size),
extent_.strided()
);
}
// Per-thread offset in logical coordinates of tensor
ell_offset_ = ThreadMap::initial_offset(thread_id);
thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id);
// update internal pointers
Layout layout(params_.stride_);
add_pointer_offset(layout(thread_offset_));
compute_predicates_(residue_extent, false);
set_iteration_index(0);
}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
if (is_residue_tile_) {
thread_offset_ += residue_offset_;
Layout layout(params_.stride_);
add_pointer_offset(layout(residue_offset_));
compute_predicates_(extent_, true);
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided() - 1);
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous() - 1);
pointer_ += Shape::kStrided * tile_offset.strided();
}
} else {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided());
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous());
pointer_ += Shape::kStrided * tile_offset.strided();
}
}
is_residue_tile_ = false;
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) / 8) + iteration_vector_;
}
/// Returns a k_location
CUTLASS_HOST_DEVICE
int get_k() const {
if(kAdvanceRank){ //strided
return ell_offset_.strided() + iteration_strided_ * ThreadMap::Delta::kStrided;
}else{
return ell_offset_.contiguous() + iteration_contiguous_ * ThreadMap::Delta::kContiguous + iteration_vector_ * AccessType::kElements;
}
}
CUTLASS_HOST_DEVICE
int get_stride() const {
if(kAdvanceRank)
return params_.stride_;
else
return 1;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
pointer_ += params_.inc_strided_;
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = enable ? 0u : predicates_[i];
}
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0xffffffff;
}
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = mask[i];
}
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] = predicates_[i];
}
}
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
Mask mask;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] = 0u;
}
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) {
int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int c = access_residual / kAccessesPerVector;
int v = access_residual % kAccessesPerVector;
TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements,
s * ThreadMap::Delta::kStrided);
TensorCoord coord = ell_offset_ + iteration_coord;
bool guard;
if (kAdvanceRank == 0) {
guard = (coord.strided() < blocksize);
} else {
guard = (coord.contiguous() < blocksize);
}
int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
mask[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx));
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] &= predicates_[i];
}
set_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
int pred_idx =
iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class EllPredicatedTileAccessIterator<Shape_, Element_, layout::ColumnMajor,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(),
threadblock_offset.column())) {}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
CUTLASS_HOST_DEVICE
int get_k() const {
return iterator_.get_k();
}
CUTLASS_HOST_DEVICE
int get_stride() const {
return iterator_.get_stride();
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class EllPredicatedTileAccessIterator<Shape_, Element_, layout::RowMajor,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
CUTLASS_HOST_DEVICE
int get_k() const {
return iterator_.get_k();
}
CUTLASS_HOST_DEVICE
int get_stride() const {
return iterator_.get_stride();
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for column-major interleaved data.
/// It is mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, int InterleavedK>
class EllPredicatedTileAccessIterator<Shape_, Element_,
layout::ColumnMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kRow * kInterleavedK,
Shape::kColumn / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row() * kInterleavedK,
extent.column() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row() * kInterleavedK,
threadblock_offset.column() / kInterleavedK)) {}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
CUTLASS_HOST_DEVICE
int get_k() const {
return iterator_.get_k();
}
CUTLASS_HOST_DEVICE
int get_stride() const {
return iterator_.get_stride();
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() { return iterator_.valid(); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for row-major interleaved data.
/// It is mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, int InterleavedK>
class EllPredicatedTileAccessIterator<Shape_, Element_,
layout::RowMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::RowMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn * kInterleavedK,
Shape::kRow / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column() * kInterleavedK,
extent.row() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column() * kInterleavedK,
threadblock_offset.row() / kInterleavedK)) {}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
CUTLASS_HOST_DEVICE
int get_k() const {
return iterator_.get_k();
}
CUTLASS_HOST_DEVICE
int get_stride() const {
return iterator_.get_stride();
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() { return iterator_.valid(); }
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 44,443 | C | 31.897113 | 138 | 0.658776 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of tiles
from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses. The first tile this
iterator visits maybe partial, then the remaining tiles are complete. So, we
only need to compute the predicates twice, once before the first tile and
once for the remaining full tiles which can share the same predicates.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileAccessIteratorPredicates
///
template <typename Shape_, typename Element_, typename Layout_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class PredicatedTileAccessIteratorPredicates {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = typename Layout::TensorCoord;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static int const kPredicatesPerByte = 4;
static int const kPredicatesPerWord = 4 * kPredicatesPerByte;
static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector;
/// Number of 32b words containing predicates
static int const kPredicateByteCount =
(kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte;
static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4;
static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u;
static_assert(kPredicateWordCount <= 4, "Too many predicates.");
/// Predicate vector stores mask to guard accesses
using Mask = Array<uint32_t, kPredicateWordCount>;
// private:
/// Guard predicates
uint32_t predicates_[kPredicateWordCount];
/// Size of tensor
TensorCoord extent_;
/// Initial offset for each thread
TensorCoord thread_offset_;
/// Offset to the first steady-state tile
TensorCoord residue_offset_;
/// Iteration along vectors implied by the thread map
int iteration_vector_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent,
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0u;
}
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) {
int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int c = access_residual / kAccessesPerVector;
int v = access_residual % kAccessesPerVector;
TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements,
s * ThreadMap::Delta::kStrided);
TensorCoord coord = thread_offset_ + iteration_coord;
bool guard;
if (is_steady_state) {
if (kAdvanceRank == 0) {
guard = (coord.strided() < extent.strided());
} else {
guard = (coord.contiguous() < extent.contiguous());
}
} else {
guard = (coord.strided() < extent.strided() &&
coord.contiguous() < extent.contiguous());
}
int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx));
}
}
CUTLASS_HOST_DEVICE
void set_predicates(int thread_id, TensorCoord const &threadblock_offset) {
TensorCoord residue_extent;
if (kAdvanceRank) {
typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.strided()) % Shape::kStrided;
if (!residue_size) {
residue_size = Shape::kStrided;
}
residue_offset_ = make_Coord(0, residue_size);
residue_extent = make_Coord(
extent_.contiguous(),
min(threadblock_offset.strided() + residue_size, extent_.strided())
);
} else {
typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.contiguous()) % Shape::kContiguous;
if (!residue_size) {
residue_size = Shape::kContiguous;
}
residue_offset_ = make_Coord(residue_size, 0);
residue_extent = make_Coord(
min(extent_.contiguous(), threadblock_offset.contiguous() + residue_size),
extent_.strided()
);
}
// Per-thread offset in logical coordinates of tensor
thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id);
compute_predicates_(residue_extent, false);
set_iteration_index(0);
}
/// Default constructor
PredicatedTileAccessIteratorPredicates() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorPredicates(
/// Extent of tensor
TensorCoord extent)
: extent_(extent) {
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorPredicates &operator++() {
return *this;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = enable ? 0u : predicates_[i];
}
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0xffffffff;
}
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = mask[i];
}
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] = predicates_[i];
}
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() const {
int pred_idx =
iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
}
};
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileAccessIterator
///
template <typename Shape, typename Element, typename Layout, int AdvanceRank,
typename ThreadMap, typename AccessType, bool Gather = false>
class PredicatedTileAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for pitch-linear data.
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, bool Gather>
class PredicatedTileAccessIterator<Shape_, Element_, layout::PitchLinear,
AdvanceRank, ThreadMap_, AccessType_, Gather> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates<
Shape, Element, Layout, AdvanceRank, ThreadMap, AccessType>;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
using Mask = typename UnderlyingPredicates::Mask;
/// Uses a non-template class
struct Params : PredicatedTileAccessIteratorParams {
using Base = PredicatedTileAccessIteratorParams;
/// Default constructor
Params() = default;
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) :
Base(layout.stride(0),
MakePredicatedTileAccessIteratorDesc<Shape, Element, Layout, kAdvanceRank, ThreadMap>()()
) { }
CUTLASS_HOST_DEVICE
Params(Base const &base) :
Base(base) { }
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
UnderlyingPredicates the_predicates;
/// Parameters object with precomputed internal state
Params params_;
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Used for out-of-order visitation
bool is_residue_tile_;
/// Below is used when Gather is turned on. We need to record strided_offset
/// and contiguous_offset seperated to compute the offset by using
///
/// offset = contiguous_offset + indices[strided_offset]
///
/// Gather indices
int const *indices_;
Index gather_offset_strided;
private:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent,
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
the_predicates.compute_predicates_(extent, is_steady_state);
}
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset,
/// Gather indices
int const *indices = nullptr)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
the_predicates(extent),
is_residue_tile_(true),
indices_(indices) {
the_predicates.set_predicates(thread_id, threadblock_offset);
// update internal pointers
Layout layout(params_.stride_);
if (!Gather) {
add_pointer_offset(layout(the_predicates.thread_offset_));
} else {
gather_offset_strided = the_predicates.thread_offset_.strided();
add_pointer_offset(layout(make_Coord(the_predicates.thread_offset_.contiguous(), 0)));
}
}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
the_predicates.set_iteration_index(index);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
if (is_residue_tile_) {
the_predicates.thread_offset_ += the_predicates.residue_offset_;
the_predicates.compute_predicates_(the_predicates.extent_, true);
Layout layout(params_.stride_);
if (!Gather) {
add_pointer_offset(layout(the_predicates.residue_offset_));
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided() - 1);
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous() - 1);
pointer_ += Shape::kStrided * tile_offset.strided();
}
} else {
gather_offset_strided = the_predicates.thread_offset_.strided();
add_pointer_offset(layout(make_Coord(the_predicates.residue_offset_.contiguous(), 0)));
if (kAdvanceRank) {
gather_offset_strided += Shape::kStrided * (tile_offset.strided() - 1);
add_pointer_offset(Shape::kContiguous * tile_offset.contiguous());
} else {
add_pointer_offset(Shape::kContiguous * (tile_offset.contiguous() - 1));
gather_offset_strided += Shape::kStrided * tile_offset.strided();
}
}
} else {
if (!Gather) {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided());
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous());
pointer_ += Shape::kStrided * tile_offset.strided();
}
} else {
add_pointer_offset(Shape::kContiguous * tile_offset.contiguous());
gather_offset_strided += Shape::kStrided * tile_offset.strided();
}
}
is_residue_tile_ = false;
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
if (Gather) {
assert(indices_);
if (!valid()) {
return nullptr;
}
LongIndex contiguous_offset = the_predicates.iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8) + the_predicates.iteration_vector_;
int strided_index = gather_offset_strided + the_predicates.iteration_strided_ * ThreadMap::Delta::kStrided;
LongIndex strided_offset = indices_[strided_index] * LongIndex(params_.stride_) * sizeof_bits<Element>::value / 8;
return reinterpret_cast<AccessType *>(pointer_ + contiguous_offset + strided_offset);
}
return reinterpret_cast<AccessType *>(
pointer_ +
the_predicates.iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) / 8) + the_predicates.iteration_vector_;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
the_predicates.operator++();
++the_predicates.iteration_vector_;
if (the_predicates.iteration_vector_ < kAccessesPerVector) {
return *this;
}
the_predicates.iteration_vector_ = 0;
++the_predicates.iteration_contiguous_;
if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
the_predicates.iteration_contiguous_ = 0;
++the_predicates.iteration_strided_;
if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) {
if (!Gather) {
pointer_ += params_.inc_strided_;
}
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
the_predicates.iteration_strided_ = 0;
if (!Gather) {
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
}
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
the_predicates.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
the_predicates.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
the_predicates.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
the_predicates.get_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() const {
return the_predicates.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for column-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, bool Gather>
class PredicatedTileAccessIterator<Shape_, Element_, layout::ColumnMajor,
AdvanceRank, ThreadMap_, AccessType_, Gather> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType, Gather>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))){};
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(),
threadblock_offset.column()),
indices) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, bool Gather>
class PredicatedTileAccessIterator<Shape_, Element_, layout::RowMajor,
AdvanceRank, ThreadMap_, AccessType_, Gather> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType, Gather>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))){};
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset,
/// Gather indices
int const *indices = nullptr)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row()),
indices) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for affine rank 2 data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class PredicatedTileAccessIterator<Shape_, Element_, layout::AffineRankN<2>,
AdvanceRank, ThreadMap_, AccessType_, false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRankN<2>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates<
Shape, Element, layout::PitchLinear, AdvanceRank, ThreadMap, AccessType>;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingPredicates::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend PredicatedTileAccessIterator;
private:
/// stride of pitch-linear layout (units of Element)
Coord<Layout::kStrideRank, Layout::LongIndex> stride_;
/// amount (in byte) to increment pointer to move to next access along
/// contiguous dimension
LongIndex inc_contiguous_;
/// amount (in byte) to increment pointer from first access of current
/// contiguous dimension to first access of next one.
LongIndex inc_strided_;
/// amount (in byte) to increment pointer from last access of current
/// contiguous dimension to first access of next one.
LongIndex inc_next_strided_;
/// amount (in byte) to increment pointer from last access to first access
/// of next tile
LongIndex inc_next_;
/// amount (in byte) to increment pointer from first access of current tile
/// to first access of next tile
LongIndex inc_advance_;
public:
// Default ctor
CUTLASS_HOST_DEVICE
Params(): stride_(0), inc_contiguous_(0), inc_strided_(0), inc_next_(0), inc_advance_(0) { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : stride_({layout.stride(0), layout.stride(1)}) {
inc_contiguous_ = (LongIndex(stride_[0]) * ThreadMap::Delta::kContiguous) *
sizeof_bits<Element>::value / 8;
inc_strided_ = (LongIndex(stride_[1]) * ThreadMap::Delta::kStrided) *
sizeof_bits<Element>::value / 8;
inc_next_strided_ = inc_strided_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_;
if (kAdvanceRank) {
// advance along strided dimension
inc_advance_ =
Shape::kStrided * LongIndex(stride_[1]) * sizeof_bits<Element>::value / 8;
} else {
// advance along contiguous dimension
inc_advance_ = Shape::kContiguous * stride_[0] * sizeof_bits<Element>::value / 8;
}
inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_ - LongIndex(ThreadMap::Iterations::kStrided - 1) * inc_strided_;
};
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
//
// Data members
//
/// Parameters object with precomputed internal state
Params params_;
/// Internal pointer to first access of tile
BytePointer pointer_;
UnderlyingPredicates the_predicates;
/// Used for out-of-order visitation
bool is_residue_tile_;
private:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent,
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
the_predicates.compute_predicates_(extent, is_steady_state);
}
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
the_predicates(extent),
is_residue_tile_(true) {
the_predicates.set_predicates(thread_id, threadblock_offset);
// update internal pointers
Layout layout(params_.stride_);
add_pointer_offset(layout(the_predicates.thread_offset_));
}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { the_predicates.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
if (is_residue_tile_) {
the_predicates.thread_offset_ += the_predicates.residue_offset_;
Layout layout(params_.stride_);
add_pointer_offset(layout(the_predicates.residue_offset_));
the_predicates.compute_predicates_(the_predicates.extent_, true);
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1] - 1);
pointer_ += Shape::kContiguous * tile_offset[0];
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0] - 1);
pointer_ += Shape::kStrided * tile_offset[1];
}
} else {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1]);
pointer_ += Shape::kContiguous * tile_offset[0];
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0]);
pointer_ += Shape::kStrided * tile_offset[1];
}
}
is_residue_tile_ = false;
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(pointer_) + the_predicates.iteration_vector_;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
the_predicates.operator++();
++the_predicates.iteration_vector_;
if (the_predicates.iteration_vector_ < kAccessesPerVector) {
return *this;
}
the_predicates.iteration_vector_ = 0;
++the_predicates.iteration_contiguous_;
if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
pointer_ += params_.inc_contiguous_;
return *this;
}
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
the_predicates.iteration_contiguous_ = 0;
++the_predicates.iteration_strided_;
if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) {
pointer_ += params_.inc_next_strided_;
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
the_predicates.iteration_strided_ = 0;
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { the_predicates.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { the_predicates.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { the_predicates.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return the_predicates.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for affine rank 2 column-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class PredicatedTileAccessIterator<Shape_, Element_, layout::AffineRank2ColumnMajor,
AdvanceRank, ThreadMap_, AccessType_, false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRank2ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
// Map to the underlying AffineRankN<2> layout
using UnderlyingIterator = PredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::AffineRankN<2>, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given an AffineRankN<2> tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1))){};
};
private:
//
// Data members
//
/// Underlying AffineRankN<2> tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(),
threadblock_offset.column())) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset(make_Coord(tile_offset.row(), tile_offset.column()));
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for affine rank-2 row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class PredicatedTileAccessIterator<Shape_, Element_, layout::AffineRank2RowMajor,
AdvanceRank, ThreadMap_, AccessType_, false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRank2RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
// Map to the underlying AffineRankN<2> layout
using UnderlyingIterator = PredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::AffineRankN<2>, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given an AffineRankN<2> tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))){};
};
private:
//
// Data members
//
/// Underlying AffineRankN<2> tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset(make_Coord(tile_offset.column(), tile_offset.row()));
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for column-major interleaved data.
/// It is mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, int InterleavedK>
class PredicatedTileAccessIterator<Shape_, Element_,
layout::ColumnMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessType_, false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kRow * kInterleavedK,
Shape::kColumn / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row() * kInterleavedK,
extent.column() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row() * kInterleavedK,
threadblock_offset.column() / kInterleavedK)) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() { return iterator_.valid(); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for row-major interleaved data.
// It is mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, int InterleavedK>
class PredicatedTileAccessIterator<Shape_, Element_,
layout::RowMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessType_, false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::RowMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn * kInterleavedK,
Shape::kRow / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileAccessIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column() * kInterleavedK,
extent.row() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column() * kInterleavedK,
threadblock_offset.row() / kInterleavedK)) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator operator++(int) {
PredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() { return iterator_.valid(); }
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 70,684 | C | 32.885427 | 176 | 0.668694 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/ell_predicated_tile_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Ell iterator for Blocked-Ell matrix (ellValue matrix) used with EllMmaPipelined
*/
#pragma once
#include "cutlass/arch/memory.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator.h"
#include "cutlass/transform/threadblock/ell_predicated_tile_access_iterator.h"
#include "cutlass/transform/threadblock/ell_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// EllPredicatedTileIterator
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
/// Regular tile iterator using a precomputed control structure to minimize register liveness
/// and integer arithmetic.
///
/// Layout is assumed to be invariant at the time the precomputed "Params" object is constructed.
///
/// Base pointer and tensor extents may be specified at the time the iterator is constructed.
/// Subsequently, they are assumed to be immutable.
///
/// Adding a logical coordinate offset may be performed at the time the iterator is constructed.
/// Subsequent additions to logical coordinate offset may be performed but are relatively expensive.
///
/// Visitation order is intended to first visit a "residual" tile that may be partially full in
/// both the advance dimension and the steady-state dimension. This is assumed to be the last
/// tile in the iteration sequence. Advancing an iterator that has just been constructed moves to
/// the first tile that is full in the advance dimension and recomputes predicates. Subsequent
/// accesses may be performed without updating internal predicates and are efficient in terms of
/// live register state and pointer arithmetic instructions.
///
/// To be efficient, this assumes the iterator will be dereferenced and advanced at least once
/// outside any looping structure to minimize integer arithmetic.
///
/// Acceses out of bounds are safe so long as `clear_mask()` is called prior to dereferencing
/// the iterator.
///
///
/// Example:
///
/// An efficient pipeline structure may be constructed as follows:
///
// template <typename Iterator>
// __global__ void kernel(
// typename Iterator::Params params,
// typename Iterator::Element *ptr,
// TensorCoord extent) {
//
// typename Iterator::Fragment fragment;
//
// TensorCoord threadblock_offset(0, 0);
//
// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets);
//
//
// fragment = *iter; // load "residue" tile first
// ++iter; // advance to first "steady state" tile and update internal masks
//
//
// #pragma unroll
// for (int i = Remaining - 1; i >= 0; --i) {
//
// f(fragment);
//
// if (!i) {
// iter.clear_mask(); // light-weight operation to clear masks - subsequent loads become NO-OPs.
// }
//
// fragment = *iter; // load tile during "steady state" phase
// ++iter; // advance to next tile - lightweight due to steady-state masks
// }
// }
//
// void host(TensorView<Element, 2, layout::PitchLinear> view) {
//
// using Iterator = transform::threadblock::EllPredicatedTileIterator;
//
// typename Iterator::Params params(view.layout());
//
// kernel<Iterator>(params, view.data());
// }
///
///
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
int AccessSize = ThreadMap::kElementsPerAccess
>
class EllPredicatedTileIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int AccessSize>
class EllPredicatedTileIterator<Shape_, Element_, layout::PitchLinear, AdvanceRank,
ThreadMap_, AccessSize> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
/// Type used for internal memory accesses
using AccessType = AlignedArray<Element, AccessSize, (AccessSize * sizeof_bits<Element>::value / 8)>;
/// Underlying iterator to compute the addresses
using TileAccessIterator =
EllPredicatedTileAccessIterator<Shape, Element, Layout, kAdvanceRank,
ThreadMap, AccessType>;
static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename TileAccessIterator::Mask;
/// Iterator for ELL storage
using EllIterator = typename cutlass::transform::threadblock::ell::Iterator;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend EllPredicatedTileIterator;
private:
/// Parameters object
typename TileAccessIterator::Params params_;
public:
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : params_(layout) { }
CUTLASS_HOST_DEVICE
Params() { }
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: address_iterator_(params.params_, pointer, extent, thread_id,
threadblock_offset) {}
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
if (kAdvanceRank)
address_iterator_.add_tile_offset({0, 1});
else
address_iterator_.add_tile_offset({1, 0});
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return address_iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { address_iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { address_iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) { address_iterator_.ell_add_mask(blocksize); }
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
address_iterator_.set_iteration_index(idx);
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + byte_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
cutlass::arch::global_load<AccessType,
sizeof(AccessType)
>(
frag_ptr[idx], access_ptr, address_iterator_.valid());
++address_iterator_;
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_byte_offset(frag, 0); }
CUTLASS_DEVICE
void load_with_ell_index(Fragment &frag, EllIterator &ell_iter) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
address_iterator_.set_iteration_index(idx);
LongIndex ell_offset = 0;
int k_offset = address_iterator_.get_k();
ell_offset = ell_iter.get_offset(k_offset) * sizeof(Element);
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + ell_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
bool is_valid = address_iterator_.valid();
is_valid = is_valid && (ell_offset >= 0);
cutlass::arch::global_load<AccessType,
sizeof(AccessType)
>(
frag_ptr[idx], access_ptr, is_valid);
++address_iterator_;
}
}
}
}
CUTLASS_DEVICE
void load_with_ell_index_fast(Fragment &frag, EllIterator &ell_iter) {
LongIndex ell_offset = ell_iter.get_offset_fast() * sizeof(Element);
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
address_iterator_.set_iteration_index(idx);
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + ell_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
bool is_valid = address_iterator_.valid();
is_valid = is_valid && (ell_offset >= 0);
cutlass::arch::global_load<AccessType,
sizeof(AccessType)
>(
frag_ptr[idx], access_ptr, is_valid);
++address_iterator_;
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
address_iterator_.set_iteration_index(0);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset;
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr);
if (address_iterator_.valid()) {
*access_ptr = frag_ptr[idx];
}
++address_iterator_;
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_byte_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int AccessSize
>
class EllPredicatedTileIterator<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, AccessSize> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
AccessSize
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Iterator for ELL storage
using EllIterator = typename cutlass::transform::threadblock::ell::Iterator;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {
}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset ///< Initial offset of threadblock
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())
) { }
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): EllPredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void load_with_ell_index(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index(frag, ell_iter);
}
CUTLASS_DEVICE
void load_with_ell_index_fast(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index_fast(frag, ell_iter);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
iterator_.store_with_byte_offset(frag, byte_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int AccessSize
>
class EllPredicatedTileIterator<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, AccessSize> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
AccessSize
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Iterator for ELL storage
using EllIterator = typename cutlass::transform::threadblock::ell::Iterator;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {
};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset ///< Initial offset of threadblock
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())
) { }
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): EllPredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void load_with_ell_index(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index(frag, ell_iter);
}
CUTLASS_DEVICE
void load_with_ell_index_fast(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index_fast(frag, ell_iter);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
iterator_.store_with_byte_offset(frag, byte_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for interleaved data. It is mapped
/// to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int AccessSize, int InterleavedK>
class EllPredicatedTileIterator<Shape_, Element_,
layout::ColumnMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessSize> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileIterator<
layout::PitchLinearShape<Shape::kRow * kInterleavedK,
Shape::kColumn / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessSize>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Iterator for ELL storage
using EllIterator = typename cutlass::transform::threadblock::ell::Iterator;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row() * kInterleavedK,
extent.column() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row() * kInterleavedK,
threadblock_offset.column() / kInterleavedK)) {}
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) { iterator_.ell_add_mask(blocksize); }
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
CUTLASS_DEVICE
void load_with_ell_index(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index(frag, ell_iter);
}
CUTLASS_DEVICE
void load_with_ell_index_fast(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index_fast(frag, ell_iter);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for interleaved-32 data. It is
/// mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int AccessSize, int InterleavedK>
class EllPredicatedTileIterator<Shape_, Element_,
layout::RowMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessSize> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::RowMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileIterator<
layout::PitchLinearShape<Shape::kColumn * kInterleavedK,
Shape::kRow / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessSize>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column() * kInterleavedK,
extent.row() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column() * kInterleavedK,
threadblock_offset.row() / kInterleavedK)) {}
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) { iterator_.ell_add_mask(blocksize); }
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 44,309 | C | 32.670213 | 109 | 0.661107 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of small
scale and bias vectors in the shared memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// RegularScaleBiasVectorAccessIterator
///
template <typename Shape, typename Element, typename Layout>
class RegularScaleBiasVectorAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_>
class RegularScaleBiasVectorAccessIterator<Shape_, Element_, layout::PitchLinear> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
/// Element type per access
static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
static int const kThreads = Shape::kContiguous / kElementsPerAccess;
using AccessType = Array<Element, kElementsPerAccess>;
private:
//
// Data members
//
/// Internal pointer
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator(
TensorRef scale_bias_ref, ///< Pointer to the start of the scale and bias
///< vector
int thread_id ///< ID of each participating thread
)
: byte_offset_(0) {
// Per-thread offset in logical coordinates of tensor
int thread_offset = thread_id * kElementsPerAccess;
// initialize pointer
pointer_ =
reinterpret_cast<AccessType *>(scale_bias_ref.data() + thread_offset);
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
char *access_byte_ptr =
reinterpret_cast<char *>(pointer_);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator &operator++() { return *this; }
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator operator++(int) {
RegularScaleBiasVectorAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset in the unit of tile.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
// Multiply by 2 because we store scale and bias belong to the same stage
// next to each other.
add_pointer_offset(coord.contiguous() * Shape::kContiguous * 2);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for row major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_>
class RegularScaleBiasVectorAccessIterator<
Shape_, Element_,
layout::RowMajor> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
/// Underlying iterator type
using UnderlyingIterator = RegularScaleBiasVectorAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator(
TensorRef scale_bias_ref, ///< Pointer to the start of the scale and bias
///< vector
int thread_id ///< ID of each participating thread
)
: iterator_({scale_bias_ref.data(), scale_bias_ref.stride()}, thread_id) {
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator operator++(int) {
RegularScaleBiasVectorAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 8,232 | C | 31.413386 | 100 | 0.657191 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of tiles from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile
first, with the objective of minimizing predicate mask updates during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be stored in registers,
and integer addition is used to advance the pointer through memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "regular_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Regular tile iterator specialized for pitch-linear. This one is used by 2-stage SIMT kernels
/// and sparse tensor core meta data.
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess, kAlignment>;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the contiguous or strided dimensions.");
private:
//
// Types
//
//
// Data members
//
/// Pointer to memory
uint8_t *pointer_;
/// Stride quantity
StrideIndex stride_;
/// Amount to increment pointer along strided dimension
Index increment_strided_;
/// Amount to advance pointer between tiles
Index increment_advance_;
public:
CUTLASS_DEVICE
RegularTileIterator(): pointer_(nullptr), increment_strided_(0), increment_advance_(0) { }
CUTLASS_DEVICE
RegularTileIterator(
TensorRef const &ref,
int thread_idx
):
pointer_(reinterpret_cast<uint8_t *>(ref.data()) + (ref.offset(ThreadMap::initial_offset(thread_idx)) * sizeof_bits<Element>::value / 8)) {
stride_ = ref.stride()[0];
increment_strided_ = (ref.stride()[0] * sizeof_bits<Element>::value) * ThreadMap::Delta::kStrided / 8;
increment_advance_ =
(kAdvanceRank == 0 ?
Shape::kContiguous * sizeof_bits<Element>::value / 8 :
Shape::kStrided * (ref.stride()[0] * sizeof_bits<Element>::value / 8));
}
/// Loads a fragment
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
uint8_t const *byte_pointer = pointer_ + pointer_offset * sizeof_bits<Element>::value / 8;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_pointer);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int idx = c + s * ThreadMap::Iterations::kContiguous;
frag_ptr[idx] = access_ptr[c * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess];
}
if (s + 1 < ThreadMap::Iterations::kStrided) {
byte_pointer += increment_strided_;
}
}
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
load_with_pointer_offset(
frag,
tile_offset.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess +
tile_offset.strided() * Shape::kStrided * stride_
);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&frag);
uint8_t *byte_pointer = pointer_ + pointer_offset * sizeof_bits<Element>::value / 8;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_pointer);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int idx = c + s * ThreadMap::Iterations::kContiguous;
access_ptr[c * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess] = frag_ptr[idx];
}
if (s + 1 < ThreadMap::Iterations::kStrided) {
byte_pointer += increment_strided_;
}
}
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
store_with_pointer_offset(
frag,
tile_offset.contiguous() * Shape::kContiguous + tile_offset.strided() * Shape::kStrided * stride_
);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
pointer_ += increment_advance_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator &operator--() {
pointer_ -= increment_advance_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset;
}
/// Adds a tile offset in the unit of tile.
/// In GEMM/Conv implementation, this is used to move in the k dimension in the shared memory.
/// Below layouts are the shared memory layouts. Current SM50 SIMT kernels only use col major A and row major B.
/// For row major A operand, k dimension is contiguous dimension;
/// For col major A operand, k dimension is strided dimension;
/// For row major B operand, k dimension is strided dimension;
/// For col major B operand, k dimension is contiguous dimension.
/// Below two classes map col/row major to the pitch linear coordinates used
/// in this base class.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
int offset = sizeof_bits<Element>::value *
(coord.contiguous() * Shape::kContiguous + coord.strided() * Shape::kStrided * stride_) / 8;
add_pointer_offset(offset);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
#if 0
AccessType *access_ptr = pointer_[iteration_strided_ & 1];
int stride_idx = (iteration_strided_ & ~1);
int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
#endif
return reinterpret_cast<AccessType *>(pointer_);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Regular tile iterator specialized for pitch-linear
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
using Underlying = RegularTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
kAlignment
>;
using AccessType = typename Underlying::AccessType;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the row or column dimensions.");
private:
Underlying iterator_;
public:
CUTLASS_DEVICE
RegularTileIterator() { }
CUTLASS_DEVICE
RegularTileIterator(
TensorRef const &ref,
int thread_idx
):
iterator_({ref.data(), ref.stride()}, thread_idx) {
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
iterator_.load_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()});
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
iterator_.load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
iterator_.store_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()});
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
iterator_.store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator &operator--() {
--iterator_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return iterator_.get();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Regular tile iterator specialized for pitch-linear
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
using Underlying = RegularTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap
>;
using AccessType = typename Underlying::AccessType;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the row or column dimensions.");
private:
Underlying iterator_;
public:
CUTLASS_DEVICE
RegularTileIterator() { }
CUTLASS_DEVICE
RegularTileIterator(
TensorRef const &ref,
int thread_idx
):
iterator_({ref.data(), ref.stride()}, thread_idx) {
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
iterator_.load_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()});
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
iterator_.load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
iterator_.store_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()});
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
iterator_.store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator &operator--() {
--iterator_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return iterator_.get();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
| 16,510 | C | 28.857143 | 143 | 0.664506 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of scale and bias vectors.
This iterator uses masks to guard out-of-bounds accesses.
This can be used to load var and mean vectors in layernorm which is loop invariant.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedScaleBiasVectorIterator
///
template <typename WarpShape,
typename Element,
typename Layout>
class PredicatedScaleBiasVectorIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for wgrad pitch-linear data.
///
template <typename WarpShape_, typename Element_>
class PredicatedScaleBiasVectorIterator<WarpShape_,
Element_,
layout::PitchLinear> {
public:
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kElementsPerAccess = 1;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
static int const kIterations = WarpShape::kContiguous / 8;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<__half2, 2 * kIterations * kElementsPerAccess>;
private:
//
// Data members
//
/// Internal pointer to first access of tile
ConstPointer scale_pointer_;
ConstPointer bias_pointer_;
/// Size of tensor
int problem_size_;
int32_t thread_offset_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
/// Extent of tensor
int problem_size,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: problem_size_(problem_size),
scale_pointer_(scale_pointer),
bias_pointer_(bias_pointer) {
thread_offset_ = threadblock_offset.contiguous() + (thread_id % 32) / 4;
}
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
/// Extent of tensor
int problem_size,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorIterator(problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Advances an iterator along logical dimensions of matrix in units of whole warp tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
thread_offset_ += (WarpShape::kContiguous * tile_offset.contiguous());
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.fill(__float2half2_rn(0.0f));
__half2 *frag_ptr = reinterpret_cast<__half2 *>(&frag);
// load scale
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
cutlass::arch::global_load<
__half,
sizeof(AccessType)
>(
frag_ptr[c * 2].x,
scale_pointer_ + thread_offset_ + c * 8,
(thread_offset_ + c * 8) < problem_size_
);
}
// load bias
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
cutlass::arch::global_load<
__half,
sizeof(AccessType)
>(
frag_ptr[c * 2 + 1].x,
bias_pointer_ + thread_offset_ + c * 8,
(thread_offset_ + c * 8) < problem_size_
);
}
// duplicate scale
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
frag_ptr[c * 2].y = frag_ptr[c * 2].x;
}
// duplicate bias
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
frag_ptr[c * 2 + 1].y = frag_ptr[c * 2 + 1].x;
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename WarpShape_,
typename Element_>
class PredicatedScaleBiasVectorIterator<WarpShape_,
Element_,
layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedScaleBiasVectorIterator<
layout::PitchLinearShape<WarpShape::kColumn, WarpShape::kRow>,
Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
using Fragment = typename UnderlyingIterator::Fragment;
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
///< Extent of tensor
int problem_size,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(problem_size, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
int problem_size, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorIterator(problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Advances an iterator along logical dimensions of matrix in units of whole
/// threadblock tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
iterator_.load(frag);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 11,097 | C | 32.732523 | 100 | 0.632964 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of tiles from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile
first, with the objective of minimizing predicate mask updates during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be stored in registers,
and integer addition is used to advance the pointer through memory.
*/
#pragma once
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h"
#include "cutlass/transform/thread/transpose.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileIterator2dThreadTile
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
/// Regular tile iterator using a precomputed control structure to minimize register liveness
/// and integer arithmetic.
///
/// Layout is assumed to be invariant at the time the precomputed "Params" object is constructed.
///
/// Base pointer and tensor extents may be specified at the time the iterator is constructed.
/// Subsequently, they are assumed to be immutable.
///
/// Adding a logical coordinate offset may be performed at the time the iterator is constructed.
/// Subsequent additions to logical coordinate offset may be performed but are relatively expensive.
///
/// Vistitation order is intended to first visit a "residual" tile that may be partially full in
/// both the advance dimension and the steady-state dimension. This is assumed to be the last
/// tile in the iteration sequence. Advancing an iterator that has just been constructed moves to
/// the first tile that is full in the advance dimension and recomputes predicates. Subsequent
/// accesses may be performed without updating internal predicates and are efficient in terms of
/// live register state and pointer arithmetic instructions.
///
/// To be efficient, this assumes the iteraor will be dereferenced and advanced at least once
/// outside any looping structure to minimize integer arithmetic.
///
/// Acceses out of bounds are safe so long as `clear_mask()` is called prior to dereferencing
/// the iterator.
///
///
/// Example:
///
/// An efficient pipeline structure may be constructed as follows:
///
// template <typename Iterator>
// __global__ void kernel(
// typename Iterator::Params params,
// typename Iterator::Element *ptr,
// TensorCoord extent) {
//
// typename Iterator::Fragment fragment;
//
// TensorCoord threadblock_offset(0, 0);
//
// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets);
//
//
// fragment = *iter; // load "residue" tile first
// ++iter; // advance to first "steady state" tile and update internal masks
//
//
// #pragma unroll
// for (int i = Remaining - 1; i >= 0; --i) {
//
// f(fragment);
//
// if (!i) {
// iter.clear_mask(); // light-weight operation to clear masks - subsequent loads become NO-OPs.
// }
//
// fragment = *iter; // load tile during "steady state" phase
// ++iter; // advance to next tile - lightweight due to steady-state masks
// }
// }
//
// void host(TensorView<Element, 2, layout::PitchLinear> view) {
//
// using Iterator = transform::threadblock::PredicatedTileIterator2dThreadTile;
//
// typename Iterator::Params params(view.layout());
//
// kernel<Iterator>(params, view.data());
// }
///
///
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
bool Transpose = false
>
class PredicatedTileIterator2dThreadTile;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, bool Transpose_>
class PredicatedTileIterator2dThreadTile<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, Transpose_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
/// Type used for internal memory accesses
/// extra set of parenthesis is needed for VS compiler
struct alignas((ThreadMap::kElementsPerAccess * sizeof_bits<Element>::value /
8)) AccessType {
Array<Element, ThreadMap::kElementsPerAccess> storage;
static int const kElements = ThreadMap::kElementsPerAccess;
};
/// Optinally this fragment can be 4x4 transposed
using Transform = thread::Transpose< ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount , layout::PitchLinearShape<4,4>, Element>;
static bool const transpose = Transpose_;
/// Underlying iterator to compute the addresses
using TileAccessIterator =
PredicatedTileAccessIterator2dThreadTile<Shape, Element, Layout, kAdvanceRank,
ThreadMap, AccessType>;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::ThreadAccessShape::kCount>;
/// Predicate vector stores mask to guard accesses
using Mask = typename TileAccessIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
using Base = typename TileAccessIterator::Params::Base;
friend PredicatedTileIterator2dThreadTile;
private:
/// Parameters object
typename TileAccessIterator::Params params_;
public:
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : params_(layout) { }
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Base const &base)
: params_(base) {}
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: address_iterator_(params.params_, pointer, extent, thread_id,
threadblock_offset) {}
/// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile &operator++() {
if (kAdvanceRank)
address_iterator_.add_tile_offset({0, 1});
else
address_iterator_.add_tile_offset({1, 0});
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile operator++(int) {
PredicatedTileIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { address_iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { address_iterator_.get_mask(mask); }
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++){
int access_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + \
s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided;
address_iterator_.set_iteration_index(access_idx);
if (address_iterator_.valid()) {
frag_ptr[access_idx] =
*(address_iterator_.get() + pointer_offset);
}
++address_iterator_;
}
}
}
if (transpose) {
Transform t;
t.transform(frag, frag);
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++){
int access_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + \
s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided;
address_iterator_.set_iteration_index(access_idx);
if (address_iterator_.valid()) {
*(address_iterator_.get() + pointer_offset) = frag_ptr[access_idx];
}
++address_iterator_;
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
bool Transpose_
>
class PredicatedTileIterator2dThreadTile<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, Transpose_> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static bool const Transpose = Transpose_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIterator2dThreadTile<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
Transpose
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIterator2dThreadTile;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset, ///< Initial offset of threadblock
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())
) { }
/// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile operator++(int) {
PredicatedTileIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
bool Transpose_
>
class PredicatedTileIterator2dThreadTile<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, Transpose_> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static bool const Transpose = Transpose_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIterator2dThreadTile<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
Transpose
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIterator2dThreadTile;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) { }
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset, ///< Initial offset of threadblock
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())
) { }
/// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile operator++(int) {
PredicatedTileIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 27,175 | C | 33.48731 | 150 | 0.670948 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op_sm70.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of tiles from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile
first, with the objective of minimizing predicate mask updates during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be stored in registers,
and integer addition is used to advance the pointer through memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm70.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<
Shape_,
Element_,
layout::VoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>,
AdvanceRank,
ThreadMap_,
Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::VoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in length.
static int const kAccessSizeInBits = 128;
static_assert(
sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits,
"This iterator requires a policy whose access size is 128bs");
///< Number of pointers
static int const kPointerCount = (ThreadMap::Iterations::kStrided > 1 ? 2 : 1);
};
private:
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType * pointer_[Detail::kPointerCount];
/// Internal byte offset
Index byte_offset_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
// This is the offset of a thread within a threadblock tile for a specific pointer
// (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile =
thread_offset_base + layout::PitchLinearCoord{0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i};
// initialize pointer
pointer_[i] = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
add_pointer_offset((kAdvanceRank ? Shape::kStrided * stride_ * Layout::kElementsPerAccess : Shape::kContiguous));
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess +
coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess
);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
Index vec_pointer_offset = pointer_offset / ThreadMap::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = pointer_[s & 1];
int stride_idx = (s & ~1);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ +
c * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess +
vec_pointer_offset;
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
char const *access_byte_ptr = reinterpret_cast<char const *>(access_ptr + access_offset);
frag_ptr[access_idx] = *reinterpret_cast<AccessType const *>(access_byte_ptr + byte_offset_);
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
Index vec_pointer_offset = pointer_offset / ThreadMap::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = pointer_[s & 1];
int stride_idx = (s & ~1);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ +
c * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess +
vec_pointer_offset;
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
char *access_byte_ptr = reinterpret_cast<char *>(access_ptr + access_offset);
*reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_) = frag_ptr[access_idx];
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<
Shape_,
Element_,
layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>,
AdvanceRank,
ThreadMap_,
Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::VoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): iterator_({ref.data(), ref.stride()}, thread_id) {
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<
Shape_,
Element_,
layout::RowMajorVoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>,
AdvanceRank,
ThreadMap_,
Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorVoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::VoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): iterator_({ref.data(), ref.stride()}, thread_id) {
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<
Shape_,
Element_,
layout::VoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>,
AdvanceRank,
ThreadMap_,
Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::VoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in length.
static int const kAccessSizeInBits = 128;
static_assert(
sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits,
"This iterator requires a policy whose access size is 128bs");
///< Number of pointers
static int const kPointerCount = (ThreadMap::Iterations::kStrided > 1 ? 2 : 1);
};
private:
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType * pointer_[Detail::kPointerCount];
/// Internal byte offset
Index byte_offset_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
// This is the offset of a thread within a threadblock tile for a specific pointer
// (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile =
thread_offset_base + layout::PitchLinearCoord{0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i};
// initialize pointer
pointer_[i] = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
add_pointer_offset((kAdvanceRank ? Shape::kStrided * stride_ * Layout::kElementsPerAccess : Shape::kContiguous));
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess +
coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess
);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
Index vec_pointer_offset = pointer_offset / ThreadMap::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = pointer_[s & 1];
int stride_idx = (s & ~1);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ +
c * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess +
vec_pointer_offset;
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
char const *access_byte_ptr = reinterpret_cast<char const *>(access_ptr + access_offset);
frag_ptr[access_idx] = *reinterpret_cast<AccessType const *>(access_byte_ptr + byte_offset_);
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
Index vec_pointer_offset = pointer_offset / ThreadMap::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = pointer_[s & 1];
int stride_idx = (s & ~1);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ +
c * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess +
vec_pointer_offset;
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
char *access_byte_ptr = reinterpret_cast<char *>(access_ptr + access_offset);
*reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_) = frag_ptr[access_idx];
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<
Shape_,
Element_,
layout::ColumnMajorVoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>,
AdvanceRank,
ThreadMap_,
Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorVoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::VoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): iterator_({ref.data(), ref.stride()}, thread_id) {
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<
Shape_,
Element_,
layout::RowMajorVoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>,
AdvanceRank,
ThreadMap_,
Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorVoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::VoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): iterator_({ref.data(), ref.stride()}, thread_id) {
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/// Tile iterator specialized for crosswise arrangements for TensorOps.
///
/// Volta TN SMEM layout is a little diffrent:
/// Crosseised elements will be stored in a line, while contiguous elements
/// sre stored in line-by-line.
/// Padding is used to reduce SMEM bank conflicts.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<
Shape_, Element_,
layout::VoltaTensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Shape_::kContiguous>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout =
layout::VoltaTensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Shape::kContiguous>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Internal details made public to facilitate introspection
struct Detail {
///< Number of pointers
static int const kPointerCount = (ThreadMap::Iterations::kStrided > 1 ? 2 : 1);
/// Iterations for the kElementsPerAccess of ThreadMap
static int const kIterarionsPerAccess =
ThreadMap::kElementsPerAccess / Layout::kElementsPerAccess;
/// Contiguous elements per line
static int const kContiguousElementsPerLine = 4;
};
private:
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
public:
/// Fragment object to be loaded or stored
using Fragment =
Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
private:
//
// Data members
//
/// The crosswised elements will be stored in a line.
/// line_size is size of crosswised dimention plus padding.
/// in units of AccessType
Index line_size;
/// Internal pointer to first access of tile
AccessType *pointer_[Detail::kPointerCount];
/// Internal byte offset
Index byte_offset_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: line_size(ref.stride(0) * Detail::kContiguousElementsPerLine / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base =
ThreadMap::initial_offset(thread_id);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile =
thread_offset_base +
layout::PitchLinearCoord{
0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i};
// initialize pointer
pointer_[i] = reinterpret_cast<AccessType *>(
ref.data() + ref.offset(thread_offset_in_threadblock_tile));
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
// (Shape::kContiguous/Layout::kElementsPerAccess)*
// line_size * Layout::kElementsPerAccess
add_pointer_offset(Shape::kContiguous * line_size);
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset((coord.contiguous() * (Shape::kContiguous / Layout::kElementsPerAccess) *
line_size + coord.strided() * Shape::kStrided) *
Layout::kElementsPerAccess);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
Index vec_pointer_offset = pointer_offset / Layout::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = pointer_[(s & 1) ^ (s / 2)];
access_ptr += 16 * (s / 2);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < Detail::kIterarionsPerAccess; ++i) {
int access_offset =
c * ThreadMap::Delta::kContiguous / Detail::kContiguousElementsPerLine * line_size +
vec_pointer_offset + i * line_size;
int access_idx = (c + s * ThreadMap::Iterations::kContiguous) *
Detail::kIterarionsPerAccess + i;
char const *access_byte_ptr = reinterpret_cast<char const*>(access_ptr + access_offset);
frag_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
access_byte_ptr + byte_offset_);
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
Index vec_pointer_offset = pointer_offset / Layout::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = pointer_[(s & 1) ^ ((s >> 1) & 1)];
access_ptr += 16 * (s / 2) + vec_pointer_offset;
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < Detail::kIterarionsPerAccess; ++i) {
int access_offset =
c * ThreadMap::Delta::kContiguous / Detail::kContiguousElementsPerLine * line_size + i * line_size;
int access_idx = (c + s * ThreadMap::Iterations::kContiguous) *
Detail::kIterarionsPerAccess + i;
char *access_byte_ptr = reinterpret_cast<char *>(access_ptr + access_offset);
*reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_) =
frag_ptr[access_idx];
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<Shape_, Element_,
layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Shape_::kRow>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Shape::kRow>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::VoltaTensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Shape::kRow>,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<Shape_, Element_,
layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Shape_::kColumn>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Shape::kColumn>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::VoltaTensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Shape::kColumn>,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
| 43,663 | C | 28.886379 | 117 | 0.665048 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/vector_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template wraps the vector access iterator concept to load whole vector from tensors in
memory. This is typically used for per-channel scale and bias in convolution kernels.
*/
#pragma once
#include "cutlass/transform/threadblock/predicated_vector_access_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename VectorAccessIterator_>
class VectorIterator {
public:
using VectorAccessIterator = VectorAccessIterator_;
using Shape = typename VectorAccessIterator::Shape;
using Element = typename VectorAccessIterator::Element;
using Layout = typename VectorAccessIterator::Layout;
using TensorCoord = typename Layout::TensorCoord;
using AccessType = typename VectorAccessIterator::AccessType;
using TensorRef = typename VectorAccessIterator::TensorRef;
using Index = typename VectorAccessIterator::Index;
using LongIndex = typename VectorAccessIterator::LongIndex;
static int const kElementsPerAccess = VectorAccessIterator::kElementsPerAccess;
static int const kRowsPerIteration = VectorAccessIterator::kRowsPerIteration;
static int const kThreads = VectorAccessIterator::kThreads;
static int const kIterations = VectorAccessIterator::kIterations;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<
Element, kElementsPerAccess * kIterations>;
private:
/// Internal state
VectorAccessIterator vector_access_iterator_;
public:
/// Constructor
CUTLASS_HOST_DEVICE
VectorIterator(
Element const *ptr,
TensorCoord extent,
int thread_idx,
int warp_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
vector_access_iterator_(ptr, extent, thread_idx, warp_idx, threadblock_offset) { }
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
VectorIterator &operator++() {
vector_access_iterator_.advance();
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
VectorIterator operator++(int) {
VectorIterator self(*this);
operator++();
return self;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.clear();
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[c],
vector_access_iterator_.get() + pointer_offset,
vector_access_iterator_.valid()
);
++vector_access_iterator_;
}
// }
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
vector_access_iterator_.set_iteration_index(0);
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void advance() {
vector_access_iterator_.advance();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 5,226 | C | 33.846666 | 100 | 0.649445 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of tiles
from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element_))>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout =
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element_))>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128bs");
///< Number of pointers
static int const kPointerCount =
(ThreadMap::Iterations::kStrided > 1 ? 2 : 1);
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_[Detail::kPointerCount];
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: stride_(ref.stride(0) / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base =
ThreadMap::initial_offset(thread_id);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile =
thread_offset_base +
layout::PitchLinearCoord{
0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i};
// initialize pointer
pointer_[i] = reinterpret_cast<AccessType *>(
ref.data() + ref.offset(thread_offset_in_threadblock_tile));
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_[iteration_strided_ & 1];
int stride_idx = (iteration_strided_ & ~1);
int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_strided_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.contiguous() * Shape::kContiguous +
coord.strided() * Shape::kStrided * stride_ *
Layout::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, int(128 / sizeof(Element_))>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, int(128 / sizeof(Element_))>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element_))>,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element_))>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, int(128 / sizeof(Element_))>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element_))>,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for crosswise arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileAccessIterator<Shape_, Element_,
layout::TensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout =
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
static int const kCrosswise = Crosswise;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(!(ThreadMap::Delta::kContiguous % kCrosswise),
"kCrosswise is the smallest unit in the contiguous dimension "
"for shared memory swizzling.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128bs");
/// Number of pointers
///
/// Note:TN kblock32 layouts only needs 1 pointer, but strangely
/// reducing pointer count hurts perfomrnace
static int const kPointerCount =
(ThreadMap::Iterations::kStrided > 1 ? 2 : 1);
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Total number of sections. The memory is divided into stages. One stage
/// can store one tile. Stage is divided into sections. Interleaved layout
/// can have multiple sections in a stage. The rest layout only has one section
/// in a stage.
int sections_;
/// Sections that a stage has
int sections_per_stage_;
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_[Detail::kPointerCount];
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: sections_(ref.stride(0) / kCrosswise),
sections_per_stage_(Shape::kContiguous / kCrosswise),
// stride_ = kCrosswise x sections_ x kFactor
stride_(ref.stride(0) * Layout::kFactor / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base =
ThreadMap::initial_offset(thread_id);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile =
thread_offset_base +
layout::PitchLinearCoord{
0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i};
// initialize pointer
pointer_[i] = reinterpret_cast<AccessType *>(ref.data()) +
ref.offset(thread_offset_in_threadblock_tile) /
Layout::kElementsPerAccess;
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_[iteration_strided_ & 1];
int stride_idx = (iteration_strided_ & ~1);
int access_offset =
stride_idx * ThreadMap::Delta::kStrided * stride_ / Layout::kFactor +
// kCrosswise elements in the contiguous dimension would span to a
// shared memory cache line.
iteration_contiguous_ * (ThreadMap::Delta::kContiguous / kCrosswise) *
Layout::TileShape::kContiguous;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_strided_ == ThreadMap::Iteration::kStrided)
// which means we enter the next section.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.contiguous() * sections_per_stage_ * stride_ *
ThreadMap::kElementsPerAccess / sections_ +
coord.strided() * Shape::kStrided * stride_ *
Layout::kElementsPerAccess / Layout::kFactor);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Crosswise>,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Crosswise>,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 27,922 | C | 33.010962 | 100 | 0.651243 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing storing of tiles from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
int Alignment = sizeof_bits<Element>::value * ThreadMap::kElementsPerAccess / 8
>
class RegularTileIterator;
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
| 2,616 | C | 40.539682 | 100 | 0.637615 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of tiles from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses. The first tile this
iterator visits maybe partial, then the remaining tiles are complete. So, we
only need to compute the predicates twice, once before the first tile and
once for the remaining full tiles which can share the same predicates.
A precomputed "Params" object minimizes the amount of state that must be stored in registers,
and integer addition is used to advance the pointer through memory.
*/
#pragma once
#include "cutlass/arch/memory.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileIterator
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
/// Regular tile iterator using a precomputed control structure to minimize register liveness
/// and integer arithmetic.
///
/// Layout is assumed to be invariant at the time the precomputed "Params" object is constructed.
///
/// Base pointer and tensor extents may be specified at the time the iterator is constructed.
/// Subsequently, they are assumed to be immutable.
///
/// Adding a logical coordinate offset may be performed at the time the iterator is constructed.
/// Subsequent additions to logical coordinate offset may be performed but are relatively expensive.
///
/// Visitation order is intended to first visit a "residual" tile that may be partially full in
/// both the advance dimension and the steady-state dimension. This is assumed to be the last
/// tile in the iteration sequence. Advancing an iterator that has just been constructed moves to
/// the first tile that is full in the advance dimension and recomputes predicates. Subsequent
/// accesses may be performed without updating internal predicates and are efficient in terms of
/// live register state and pointer arithmetic instructions.
///
/// To be efficient, this assumes the iterator will be dereferenced and advanced at least once
/// outside any looping structure to minimize integer arithmetic.
///
/// Acceses out of bounds are safe so long as `clear_mask()` is called prior to dereferencing
/// the iterator.
///
///
/// Example:
///
/// An efficient pipeline structure may be constructed as follows:
///
// template <typename Iterator>
// __global__ void kernel(
// typename Iterator::Params params,
// typename Iterator::Element *ptr,
// TensorCoord extent) {
//
// typename Iterator::Fragment fragment;
//
// TensorCoord threadblock_offset(0, 0);
//
// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets);
//
//
// fragment = *iter; // load "residue" tile first
// ++iter; // advance to first "steady state" tile and update internal masks
//
//
// #pragma unroll
// for (int i = Remaining - 1; i >= 0; --i) {
//
// f(fragment);
//
// if (!i) {
// iter.clear_mask(); // light-weight operation to clear masks - subsequent loads become NO-OPs.
// }
//
// fragment = *iter; // load tile during "steady state" phase
// ++iter; // advance to next tile - lightweight due to steady-state masks
// }
// }
//
// void host(TensorView<Element, 2, layout::PitchLinear> view) {
//
// using Iterator = transform::threadblock::PredicatedTileIterator;
//
// typename Iterator::Params params(view.layout());
//
// kernel<Iterator>(params, view.data());
// }
///
///
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
int AccessSize = ThreadMap::kElementsPerAccess,
bool Gather = false
>
class PredicatedTileIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int AccessSize, bool Gather>
class PredicatedTileIterator<Shape_, Element_, layout::PitchLinear, AdvanceRank,
ThreadMap_, AccessSize, Gather> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
/// Type used for internal memory accesses
using AccessType = AlignedArray<Element, AccessSize, (AccessSize * sizeof_bits<Element>::value / 8)>;
/// Underlying iterator to compute the addresses
using TileAccessIterator =
PredicatedTileAccessIterator<Shape, Element, Layout, kAdvanceRank,
ThreadMap, AccessType, Gather>;
static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename TileAccessIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
using Base = typename TileAccessIterator::Params::Base;
friend PredicatedTileIterator;
private:
/// Parameters object
typename TileAccessIterator::Params params_;
public:
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : params_(layout) {}
/// Default constructor
Params() = default;
CUTLASS_HOST_DEVICE
Params(Base const &base)
: params_(base) {}
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Default constructor
PredicatedTileIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset,
/// Gather indices
int const *indices = nullptr)
: address_iterator_(params.params_, pointer, extent, thread_id,
threadblock_offset, indices) {}
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator &operator++() {
if (kAdvanceRank)
address_iterator_.add_tile_offset({0, 1});
else
address_iterator_.add_tile_offset({1, 0});
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator operator++(int) {
PredicatedTileIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { address_iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { address_iterator_.get_mask(mask); }
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
address_iterator_.set_iteration_index(idx);
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + byte_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
cutlass::arch::global_load<AccessType,
sizeof(AccessType)
>(
frag_ptr[idx], access_ptr, address_iterator_.valid());
++address_iterator_;
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_byte_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
address_iterator_.set_iteration_index(0);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset;
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr);
if (address_iterator_.valid()) {
*access_ptr = frag_ptr[idx];
}
++address_iterator_;
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_byte_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int AccessSize,
bool Gather
>
class PredicatedTileIterator<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, AccessSize, Gather> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
AccessSize,
Gather
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0)))
{}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset, ///< Initial offset of threadblock
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()),
indices)
{ }
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator operator++(int) {
PredicatedTileIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
iterator_.store_with_byte_offset(frag, byte_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int AccessSize,
bool Gather
>
class PredicatedTileIterator<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, AccessSize, Gather> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
AccessSize,
Gather
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset, ///< Initial offset of threadblock
int const *indices = nullptr ///< Gather indices
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()),
indices
) { }
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator operator++(int) {
PredicatedTileIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
iterator_.store_with_byte_offset(frag, byte_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for affine rank-2 data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int AccessSize>
class PredicatedTileIterator<Shape_, Element_, layout::AffineRankN<2>, AdvanceRank,
ThreadMap_, AccessSize, false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRankN<2>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
/// Type used for internal memory accesses
using AccessType = AlignedArray<Element, AccessSize, (AccessSize * sizeof_bits<Element>::value / 8)>;
/// Underlying iterator to compute the addresses
using TileAccessIterator =
PredicatedTileAccessIterator<Shape, Element, Layout, kAdvanceRank,
ThreadMap, AccessType>;
static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename TileAccessIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend PredicatedTileIterator;
private:
/// Parameters object
typename TileAccessIterator::Params params_;
public:
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : params_(layout) {}
/// Default constructor
Params() = default;
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Default constructor
PredicatedTileIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: address_iterator_(params.params_, pointer, extent, thread_id,
threadblock_offset) {}
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator &operator++() {
if (kAdvanceRank)
address_iterator_.add_tile_offset(make_Coord(0, 1));
else
address_iterator_.add_tile_offset(make_Coord(1, 0));
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator operator++(int) {
PredicatedTileIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { address_iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { address_iterator_.get_mask(mask); }
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
address_iterator_.set_iteration_index(idx);
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + byte_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
cutlass::arch::global_load<AccessType,
sizeof(AccessType)
>(
frag_ptr[idx], access_ptr, address_iterator_.valid());
++address_iterator_;
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_byte_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
address_iterator_.set_iteration_index(0);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset;
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr);
if (address_iterator_.valid()) {
*access_ptr = frag_ptr[idx];
}
++address_iterator_;
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_byte_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for affine rank 2 column-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int AccessSize
>
class PredicatedTileIterator<Shape_, Element_, layout::AffineRank2ColumnMajor, AdvanceRank, ThreadMap_, AccessSize, false> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRank2ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
// Map to the underlying AffineRankN<2> layout
using UnderlyingIterator = PredicatedTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::AffineRankN<2>,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
AccessSize
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given an AffineRankN<2> tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1)))
{}
};
private:
//
// Data members
//
/// Underlying AffineRankN<2> tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset, ///< Initial offset of threadblock
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())
) { }
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator operator++(int) {
PredicatedTileIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
iterator_.store_with_byte_offset(frag, byte_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for affine rank 2 row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int AccessSize
>
class PredicatedTileIterator<Shape_, Element_, layout::AffineRank2RowMajor, AdvanceRank, ThreadMap_, AccessSize, false> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::AffineRank2RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
// Map to the underlying AffineRankN<2> layout
using UnderlyingIterator = PredicatedTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::AffineRankN<2>,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
AccessSize
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given an AffineRankN<2> tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))) {}
};
private:
//
// Data members
//
/// Underlying AffineRankN<2> tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset, ///< Initial offset of threadblock
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())
) { }
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator operator++(int) {
PredicatedTileIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
iterator_.store_with_byte_offset(frag, byte_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for interleaved data. It is mapped
/// to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int AccessSize, int InterleavedK>
class PredicatedTileIterator<Shape_, Element_,
layout::ColumnMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessSize, false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIterator<
layout::PitchLinearShape<Shape::kRow * kInterleavedK,
Shape::kColumn / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessSize>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row() * kInterleavedK,
extent.column() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row() * kInterleavedK,
threadblock_offset.column() / kInterleavedK)) {}
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator operator++(int) {
PredicatedTileIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for interleaved-32 data. It is
/// mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int AccessSize, int InterleavedK>
class PredicatedTileIterator<Shape_, Element_,
layout::RowMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessSize, false> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::RowMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIterator<
layout::PitchLinearShape<Shape::kColumn * kInterleavedK,
Shape::kRow / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessSize>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default constructor
Params() = default;
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Default constructor
PredicatedTileIterator() = default;
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column() * kInterleavedK,
extent.row() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column() * kInterleavedK,
threadblock_offset.row() / kInterleavedK)) {}
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator operator++(int) {
PredicatedTileIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 62,672 | C | 32.318979 | 125 | 0.66564 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_params.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/pitch_linear.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Predicated tile access iterator descriptor object containing template dependent state
struct PredicatedTileAccessIteratorDesc {
int element_size_bits;
int advance_rank;
layout::PitchLinearCoord threadblock_shape;
layout::PitchLinearCoord threadmap_iterations;
layout::PitchLinearCoord threadmap_delta;
//
// Methods
//
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorDesc() { }
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorDesc(
int element_size_bits_,
int advance_rank_,
layout::PitchLinearCoord threadblock_shape_,
layout::PitchLinearCoord threadmap_iterations_,
layout::PitchLinearCoord threadmap_delta_
):
element_size_bits(element_size_bits_),
advance_rank(advance_rank_),
threadblock_shape(threadblock_shape_),
threadmap_iterations(threadmap_iterations_),
threadmap_delta(threadmap_delta_)
{
#if 0
printf("PredicatedTileAccessIteratorDesc(%d, %d, {%d, %d}, {%d, %d}, {%d, %d}})\n",
element_size_bits,
advance_rank,
threadblock_shape.contiguous(), threadblock_shape.strided(),
threadmap_iterations.contiguous(), threadmap_iterations.strided(),
threadmap_delta.contiguous(), threadmap_delta.strided());
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper template to construct an PredicatedTileAccessIteratorDesc from a template
// dependent state
template <
typename Shape, typename Element, typename Layout,
int AdvanceRank, typename ThreadMap>
struct MakePredicatedTileAccessIteratorDesc;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for pitch-linear data.
template <
typename Shape, typename Element, int AdvanceRank,
typename ThreadMap>
struct MakePredicatedTileAccessIteratorDesc <
Shape, Element, layout::PitchLinear, AdvanceRank, ThreadMap> {
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorDesc operator()() {
return PredicatedTileAccessIteratorDesc(
sizeof_bits<Element>::value,
AdvanceRank,
{Shape::kContiguous, Shape::kStrided},
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for column-major data.
template <
typename Shape, typename Element, int AdvanceRank,
typename ThreadMap>
struct MakePredicatedTileAccessIteratorDesc <
Shape, Element, layout::ColumnMajor, AdvanceRank, ThreadMap> {
static int const kAdvanceRank = AdvanceRank;
using UnderlyingMakeOperator = MakePredicatedTileAccessIteratorDesc<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap>;
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorDesc operator()() {
return UnderlyingMakeOperator()();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for row-major data.
template <
typename Shape, typename Element, int AdvanceRank,
typename ThreadMap>
struct MakePredicatedTileAccessIteratorDesc <
Shape, Element, layout::RowMajor, AdvanceRank, ThreadMap> {
static int const kAdvanceRank = AdvanceRank;
using UnderlyingMakeOperator = MakePredicatedTileAccessIteratorDesc<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap>;
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorDesc operator()() {
return UnderlyingMakeOperator()();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for column-major interleaved data.
template <
typename Shape, typename Element, int AdvanceRank,
typename ThreadMap, int InterleavedK>
struct MakePredicatedTileAccessIteratorDesc <
Shape, Element, layout::ColumnMajorInterleaved<InterleavedK>, AdvanceRank, ThreadMap> {
static int const kAdvanceRank = AdvanceRank;
static int const kInterleavedK = InterleavedK;
using UnderlyingMakeOperator = MakePredicatedTileAccessIteratorDesc<
layout::PitchLinearShape<Shape::kRow * kInterleavedK, Shape::kColumn / kInterleavedK>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap>;
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorDesc operator()() {
return UnderlyingMakeOperator()();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for roww-major interleaved data.
template <
typename Shape, typename Element, int AdvanceRank,
typename ThreadMap, int InterleavedK>
struct MakePredicatedTileAccessIteratorDesc <
Shape, Element, layout::RowMajorInterleaved<InterleavedK>, AdvanceRank, ThreadMap> {
static int const kAdvanceRank = AdvanceRank;
static int const kInterleavedK = InterleavedK;
using UnderlyingMakeOperator = MakePredicatedTileAccessIteratorDesc<
layout::PitchLinearShape<Shape::kColumn * kInterleavedK, Shape::kRow / kInterleavedK>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap>;
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorDesc operator()() {
return UnderlyingMakeOperator()();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Parameters struct
//
struct PredicatedTileAccessIteratorParams {
using Index = int32_t;
using LongIndex = int64_t;
//
// Data members
//
/// stride of pitch-linear layout (units of Element)
LongIndex stride_;
/// amount (in byte) to increment pointer to move to next access along
/// strided dimension
LongIndex inc_strided_;
/// amount (in byte) to increment pointer from last access to first access
/// of next tile
LongIndex inc_next_;
/// amount (in byte) to increment pointer from first access of current tile
/// to first access of next tile
LongIndex inc_advance_;
//
// Methods
//
CUTLASS_HOST_DEVICE
Status initialize(LongIndex stride, PredicatedTileAccessIteratorDesc desc) {
stride_ = stride;
inc_strided_ = (LongIndex(stride_) * desc.threadmap_delta.strided()) *
desc.element_size_bits / 8;
if (desc.advance_rank) {
// advance along strided dimension
inc_advance_ =
desc.threadblock_shape.strided() * LongIndex(stride_) * desc.element_size_bits / 8;
} else {
// advance along contiguous dimension
inc_advance_ = desc.threadblock_shape.contiguous() * desc.element_size_bits / 8;
}
inc_next_ = inc_advance_ - LongIndex(desc.threadmap_iterations.strided() - 1) *
desc.threadmap_delta.strided() * LongIndex(stride_) *
desc.element_size_bits / 8;
return Status::kSuccess;
}
CUTLASS_HOST_DEVICE
Status initialize(Index stride, PredicatedTileAccessIteratorDesc desc) {
return initialize(LongIndex(stride), desc);
}
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorParams() {
initialize(LongIndex(0), PredicatedTileAccessIteratorDesc());
}
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorParams(Index stride, PredicatedTileAccessIteratorDesc desc) {
initialize(stride, desc);
}
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorParams(LongIndex stride, PredicatedTileAccessIteratorDesc desc) {
initialize(stride, desc);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 10,243 | C | 34.324138 | 101 | 0.647857 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing the address computation of storing of tiles
from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/cutlass.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <typename Shape, typename Element, typename Layout, int AdvanceRank,
typename ThreadMap,
int Alignment =
sizeof_bits<Element>::value* ThreadMap::kElementsPerAccess / 8>
class RegularTileAccessIterator;
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
| 2,638 | C | 43.728813 | 100 | 0.633055 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing storing of tiles from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element_))>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout =
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element))>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in length.
static int const kAccessSizeInBits = 128;
static_assert(
sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits,
"This iterator requires a policy whose access size is 128bs");
};
private:
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>;
/// Underlying iterator to compute the addresses
using TileAccessIterator = RegularTileAccessIterator<Shape, Element, Layout,
kAdvanceRank, ThreadMap>;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: address_iterator_(ref, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
address_iterator_.add_tile_offset({0, 1});
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
address_iterator_.add_tile_offset(coord);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, Index byte_offset) {
address_iterator_.set_iteration_index(0);
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + byte_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
frag_ptr[access_idx] = *access_ptr;
++address_iterator_;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, Index byte_offset) {
address_iterator_.set_iteration_index(0);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset;
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr);
*access_ptr = frag_ptr[access_idx];
++address_iterator_;
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_byte_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, int(128 / sizeof(Element_))>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, int(128 / sizeof(Element))>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element))>,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): iterator_({ref.data(), ref.stride()}, thread_id) {
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileIterator<
Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element_))>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, int(128 / sizeof(Element))>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element))>,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): iterator_({ref.data(), ref.stride()}, thread_id) {
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for crosswise arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileIterator<Shape_, Element_,
layout::TensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout =
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128bs");
};
private:
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
public:
/// Fragment object to be loaded or stored
using Fragment =
Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>;
/// Underlying iterator to compute the addresses
using TileAccessIterator = RegularTileAccessIterator<Shape, Element, Layout,
kAdvanceRank, ThreadMap>;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: address_iterator_(ref, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
address_iterator_.add_tile_offset({1, 0});
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
address_iterator_.add_tile_offset(coord);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
address_iterator_.set_iteration_index(0);
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
frag_ptr[access_idx] = *(address_iterator_.get() + pointer_offset);
++address_iterator_;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, Index byte_offset) {
address_iterator_.set_iteration_index(0);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset;
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr);
*access_ptr = frag_ptr[access_idx];
++address_iterator_;
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileIterator<Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Crosswise>,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Crosswise>,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for k interleaved arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int InterleavedK, int Alignment>
class RegularTileIterator<
Shape_, Element_,
layout::TensorOpMultiplicandRowMajorInterleaved<sizeof_bits<Element_>::value,
InterleavedK>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout =
layout::TensorOpMultiplicandRowMajorInterleaved<sizeof_bits<Element_>::value,
InterleavedK>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128bs");
};
private:
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
public:
/// Fragment object to be loaded or stored
using Fragment =
Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>;
/// Underlying iterator to compute the addresses
using TileAccessIterator = RegularTileAccessIterator<Shape, Element, Layout,
kAdvanceRank, ThreadMap>;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: address_iterator_(ref, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
address_iterator_.add_pointer_offset(Shape::kCount);
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
address_iterator_.add_pointer_offset(coord.contiguous() * Shape::kCount);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
address_iterator_.set_iteration_index(0);
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
frag_ptr[access_idx] = *(address_iterator_.get() + pointer_offset);
++address_iterator_;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
*(address_iterator_.get() + pointer_offset) = frag_ptr[access_idx];
++address_iterator_;
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for k interleaved arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int InterleavedK, int Alignment>
class RegularTileIterator<
Shape_, Element_,
layout::TensorOpMultiplicandColumnMajorInterleaved<sizeof_bits<Element_>::value,
InterleavedK>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout =
layout::TensorOpMultiplicandColumnMajorInterleaved<sizeof_bits<Element_>::value,
InterleavedK>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
cutlass::MatrixShape<Shape::kColumn, Shape::kRow>,
Element,
layout::TensorOpMultiplicandRowMajorInterleaved<sizeof_bits<Element_>::value, InterleavedK>,
(kAdvanceRank == 1 ? 0 : 1),
ThreadMap
>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.strided(), coord.contiguous()});
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 36,050 | C | 31.537004 | 116 | 0.655201 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/thread/matrix.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a matrix object intended for storing data in registers and operations within
a CUDA thread.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_coord.h"
namespace cutlass {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Per-thread matrix object storing a packed matrix
template <
typename Element,
int Rows,
int Columns,
typename Layout = layout::RowMajor
>
class Matrix : public Array<Element, Rows * Columns> {
public:
// Verify layout refers to a rank=2 matrix.
static_assert(
Layout::kRank == 2,
"Layout type must refer to a rank=2 matrix");
/// Base type
using Base = Array<Element, Rows * Columns>;
/// Element type
using Element = Element_;
/// Number of rows
static int const kRows = Rows;
/// Number of columns
static int const kColumns = Columns;
/// Layout within the array
using Layout = Layout_;
/// Reference type to an element
using Reference = Element &;
/// Logical rank of tensor index space
static int const kRank = 2;
/// Index type
using Index = typename Layout::Index;
/// Long index used for pointer offsets
using LongIndex = typename Layout::LongIndex;
/// Coordinate in logical tensor space
using TensorCoord = typename Layout::TensorCoord;
/// Stride type
using Stride = typename Layout::Stride;
/// TensorRef to matrix object
using TensorRef = TensorRef<Element, kRank, Layout>;
/// TensorRef to constant matrix object
using ConstTensorRef = typename TensorRef::ConstTensorRef;
/// TensorRef to matrix object
using TensorView = TensorView<Element, kRank, Layout>;
/// TensorRef to constant matrix object
using ConstTensorView = typename TensorView::ConstTensorView;
/// Diagonal vector
using Diagonal = Vector<Element, __NV_STD_MIN(kRows, kColumns)>;
private:
public:
//
// Methods
//
/// Returns the size of the object
CUTLASS_HOST_DEVICE
static MatrixCoord extent() {
return make_Coord(kRows, kColumns);
}
/// Returns the layout object
CUTLASS_HOST_DEVICE
static Layout layout() {
return Layout::packed(extent());
}
/// Ctor
CUTLASS_HOST_DEVICE
Matrix() { }
/// Ctor
CUTLASS_HOST_DEVICE
Matrix(Diagonal const &diag) {
// Todo - construct from diagonal
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
TensorRef ref() {
return TensorRef(this->data(), layout());
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
ConstTensorRef const_ref() const {
return ConstTensorRef(this->data(), layout());
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
TensorView view() {
return TensorView(ref(), extent());
}
/// Returns a TensorView to const data
CUTLASS_HOST_DEVICE
ConstTensorView const_view() const {
return ConstTensorView(const_ref(), extent());
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference at(MatrixCoord const& coord) const {
typename Base::size_type offset_(layout().offset(coord));
return Base::at(offset_);
}
/// Returns the number of scalar elements needed to store tensor.
CUTLASS_HOST_DEVICE
LongIndex capacity() const {
return LongIndex(Base::size());
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Column vector defined as a matrix with exactly one column
template <
typename Element,
int Rows,
typename Layout = layout::ColumnMajor
>
using ColumnVector = Matrix<Element, Rows, 1, Layout>;
/// Row vector defined as a matrix with exactly one row
template <
typename Element,
int Columns,
typename Layout = layout::RowMajor
>
using RowVector = Matrix<Element, 1, Columns, Layout>;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace cutlass
| 5,931 | C | 28.66 | 100 | 0.658742 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/reduction/threadblock_swizzle.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defies functors for mapping blockIdx to partitions of the batched reduction computation.
*/
#pragma once
#include "cutlass/coord.h"
namespace cutlass {
namespace reduction {
struct DefaultBlockSwizzle {
/// Ctor
CUTLASS_HOST_DEVICE DefaultBlockSwizzle() {}
/// Swizzle the block index.
CUTLASS_DEVICE dim3 swizzle() { return blockIdx; }
///
CUTLASS_HOST_DEVICE dim3 get_grid_layout(Coord<3> const &problem_size,
Coord<3> const &OutputTile) {
assert(OutputTile[0] == 1 && OutputTile[1] == 1);
assert((problem_size[0] * problem_size[1] * problem_size[2]) % OutputTile[2] == 0);
dim3 grid;
grid.x = problem_size[0] * problem_size[1] * problem_size[2]
/ OutputTile[2] ;
return grid;
}
///
CUTLASS_DEVICE Coord<3> get_threadblock_offset(Coord<3> const &SubTile) {
assert(SubTile[0] == 1 && SubTile[1] == 1);
dim3 block = swizzle();
Coord<3> threadblock_offset =
make_Coord(0, 0, block.x * SubTile[2]);
return threadblock_offset;
}
};
} // namespace reduction
} // namespace cutlass
| 2,936 | C | 42.191176 | 100 | 0.6703 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/reduction/thread/reduction_operators.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over densely packed tensors in global memory
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mixed-precision reduction
template <
typename ElementAccumulator_,
typename Element_,
int Count = 1
>
struct ReduceAdd {
//
// Type definitions
//
using ElementAccumulator = ElementAccumulator_;
using Element = Element_;
static int const kCount = Count;
using FragmentAccumulator = cutlass::Array<ElementAccumulator, kCount>;
using FragmentElement = cutlass::Array<Element, kCount>;
struct Params { };
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
ReduceAdd(Params params_ = Params()): params(params_) { }
/// Operator
CUTLASS_HOST_DEVICE
FragmentAccumulator operator()(
FragmentAccumulator accumulator,
FragmentElement element) const {
plus<FragmentAccumulator> op;
NumericArrayConverter<
ElementAccumulator,
Element,
kCount,
PreferredRoundingMode<ElementAccumulator, Element>::kRound> converter;
return op(accumulator, converter(element));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Special handling for binary operators
template <typename ReductionOp, typename Element, int N>
struct VectorizeArrayOperation {
using ValueType = Array<Element, N>;
CUTLASS_HOST_DEVICE
ValueType operator()(
ReductionOp const &reduction_op,
ValueType const &lhs,
ValueType const &rhs) const {
ValueType result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = reduction_op(lhs[i], rhs[i]);
}
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ReductionOp, typename Element, int N>
struct ReduceArrayOperation {
using ArrayType = Array<Element, N>;
CUTLASS_HOST_DEVICE
Element operator()(
ReductionOp const &reduction_op,
ArrayType const &array) const {
Element item = reduction_op(array[0], array[1]);
CUTLASS_PRAGMA_UNROLL
for (int i = 2; i < N; ++i) {
item = reduction_op(item, array[i]);
}
return item;
}
};
template <int N>
struct ReduceArrayOperation<logical_and<uint1b_t>, uint1b_t, N> {
using ArrayType = Array<uint1b_t, N>;
CUTLASS_HOST_DEVICE
uint1b_t operator()(
logical_and<uint1b_t> const &reduction_op,
ArrayType const &array) const {
uint8_t const *ptr = reinterpret_cast<uint8_t const *>(&array);
bool item = false;
CUTLASS_PRAGMA_UNROLL
for (int byte = 0; byte < (N + 7) / 8; ++byte) {
uint8_t bits = ptr[byte];
item = (item || !bits);
}
return uint1b_t(!item);
}
};
template <int N>
struct ReduceArrayOperation<logical_or<uint1b_t>, uint1b_t, N> {
using ArrayType = Array<uint1b_t, N>;
CUTLASS_HOST_DEVICE
uint1b_t operator()(
logical_and<uint1b_t> const &reduction_op,
ArrayType const &array) const {
uint8_t const *ptr = reinterpret_cast<uint8_t const *>(&array);
bool item = true;
CUTLASS_PRAGMA_UNROLL
for (int byte = 0; byte < (N + 7) / 8; ++byte) {
uint8_t bits = ptr[byte];
item = (item || bits);
}
return uint1b_t(item);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper function to infer template argument types
template <typename ReductionOp, typename Element, int N>
CUTLASS_HOST_DEVICE
Array<Element, N> ApplyArrayOperator(
ReductionOp const &reduction_op,
Array<Element, N> const &lhs,
Array<Element, N> const &rhs) {
VectorizeArrayOperation<ReductionOp, Element, N> vectorize_op;
return vectorize_op(reduction_op, lhs, rhs);
}
/// Helper to reduce an array
template <typename ReductionOp, typename Element, int N>
Element ReduceArray(ReductionOp const &reduction_op, Array<Element, N> const &array) {
ReduceArrayOperation<ReductionOp, Element, N> reduce_array_op;
return reduce_array_op(reduction_op, array);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace reduction
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 6,790 | C | 27.775424 | 100 | 0.602504 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/reduction/thread/reduce.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic thread level reduction with specializations for Array<T, N>.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/half.h"
#include "cutlass/functional.h"
namespace cutlass {
namespace reduction {
namespace thread {
/// Structure to compute the thread level reduction
template <typename Op, typename T>
struct Reduce;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial Specialization of Reduce for "plus" (a functional operator)
template <typename T>
struct Reduce< plus<T>, T > {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
plus<T> _op;
return _op(lhs, rhs);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization of Reduce for Array<T, N>
template <typename T, int N>
struct Reduce < plus<T>, Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, 1> operator()(Array<T, N> const &in) const {
Array<T, 1> result;
Reduce< plus<T>, T > scalar_reduce;
result.clear();
CUTLASS_PRAGMA_UNROLL
for (auto i = 0; i < N; ++i) {
result[0] = scalar_reduce(result[0], in[i]);
}
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specializations of Reduce for Array<half_t, N>
template <int N>
struct Reduce < plus<half_t>, Array<half_t, N> > {
CUTLASS_HOST_DEVICE
Array<half_t, 1> operator()(Array<half_t, N> const &input) {
Array<half_t, 1> result;
// If there is only 1 element - there is nothing to reduce
if( N ==1 ){
result[0] = input.front();
} else {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
__half result_d;
Array<half_t, 1> const *in_ptr_half = reinterpret_cast<Array<half_t, 1> const *>(&input);
Array<half_t, 2> const *in_ptr_half2 = reinterpret_cast<Array<half_t, 2> const *>(&input);
__half2 const *x_in_half2 = reinterpret_cast<__half2 const *>(in_ptr_half2);
// Set initial result = first half2, in case N==2
__half2 tmp_result = x_in_half2[0];
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < N/2; ++i) {
tmp_result = __hadd2(x_in_half2[i], tmp_result);
}
result_d = __hadd(__low2half(tmp_result), __high2half(tmp_result));
// One final step is needed for odd "N" (to add the (N-1)th element)
if( N%2 ){
__half last_element;
Array<half_t, 1> tmp_last;
Array<half_t, 1> *tmp_last_ptr = &tmp_last;
tmp_last_ptr[0] = in_ptr_half[N-1];
last_element = reinterpret_cast<__half const &>(tmp_last);
result_d = __hadd(result_d, last_element);
}
Array<half_t, 1> *result_ptr = &result;
*result_ptr = reinterpret_cast<Array<half_t, 1> &>(result_d);
#else
Reduce< plus<half_t>, half_t > scalar_reduce;
result.clear();
CUTLASS_PRAGMA_UNROLL
for (auto i = 0; i < N; ++i) {
result[0] = scalar_reduce(result[0], input[i]);
}
#endif
}
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specializations of Reduce for AlignedArray<half_t, N>
template <int N>
struct Reduce < plus<half_t>, AlignedArray<half_t, N> > {
CUTLASS_HOST_DEVICE
Array<half_t, 1> operator()(AlignedArray<half_t, N> const &input) {
Array<half_t, 1> result;
// If there is only 1 element - there is nothing to reduce
if( N ==1 ){
result[0] = input.front();
} else {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
__half result_d;
AlignedArray<half_t, 1> const *in_ptr_half = reinterpret_cast<AlignedArray<half_t, 1> const *>(&input);
AlignedArray<half_t, 2> const *in_ptr_half2 = reinterpret_cast<AlignedArray<half_t, 2> const *>(&input);
__half2 const *x_in_half2 = reinterpret_cast<__half2 const *>(in_ptr_half2);
// Set initial result = first half2, in case N==2
__half2 tmp_result = x_in_half2[0];
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < N/2; ++i) {
tmp_result = __hadd2(x_in_half2[i], tmp_result);
}
result_d = __hadd(__low2half(tmp_result), __high2half(tmp_result));
// One final step is needed for odd "N" (to add the (N-1)th element)
if( N%2 ){
__half last_element;
AlignedArray<half_t, 1> tmp_last;
AlignedArray<half_t, 1> *tmp_last_ptr = &tmp_last;
tmp_last_ptr[0] = in_ptr_half[N-1];
last_element = reinterpret_cast<__half const &>(tmp_last);
result_d = __hadd(result_d, last_element);
}
Array<half_t, 1> *result_ptr = &result;
*result_ptr = reinterpret_cast<Array<half_t, 1> &>(result_d);
#else
Reduce< plus<half_t>, half_t > scalar_reduce;
result.clear();
CUTLASS_PRAGMA_UNROLL
for (auto i = 0; i < N; ++i) {
result[0] = scalar_reduce(result[0], input[i]);
}
#endif
}
return result;
}
};
}
}
}
| 7,208 | C | 29.676596 | 112 | 0.56576 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/reduction/device/tensor_reduce_affine_strided.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over one or more ranks of an affine tensor
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/fast_math.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/device_kernel.h"
#include "cutlass/reduction/kernel/tensor_reduce_affine_strided.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tensor reduction operator on layouts which are affine
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput_,
typename ElementSource_,
typename ReductionOp_,
int VectorLength = 1,
typename ElementCompute_ = ElementOutput_,
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
struct TensorReductionAffineStrided {
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
using ElementOutput = ElementOutput_;
using ElementSource = ElementSource_;
using ReductionOp = ReductionOp_;
using ElementCompute = ElementCompute_;
//
// Data members
//
/// Internal status field
Status status;
/// Extent of tensor in source layout
Coord<kRank> extent;
/// Number of points in the outer index space
int64_t outer_count;
/// Number of elements in the inner index space
int64_t inner_count;
/// Number of workspaces needed
int workspace_count;
/// CUDA Grid shape (.x => contiguous, .y => outer, .z => inner)
dim3 grid_shape;
/// CUDA Threadblock shape (.x => contiguous, .y => outer, .z => inner)
dim3 threadblock_shape;
/// CUDA grid shape for the final reduction step if needed
dim3 grid_final;
/// CUDA threadblock shape for the final reduction step if needed
dim3 threadblock_final;
private:
//
// Methods
//
/// Helper to reshape 'count' such that it is less than 2 x 'ext'
static int reshape_pow2(int ext, int count) {
if (ext > count) {
return 1;
}
int x = 1;
for (; count >= ext * 2; ) {
count >>= 1;
x <<= 1;
}
return x;
}
public:
/// Default ctor
TensorReductionAffineStrided():
status(Status::kErrorInvalidProblem),
extent(),
outer_count(0),
inner_count(0),
workspace_count(0),
grid_shape(0, 0, 0),
threadblock_shape(0, 0, 0) { }
/// Constructor
TensorReductionAffineStrided(
Coord<kRank> extent_,
int target_threadblock_count = 128
):
status(Status::kSuccess),
extent(extent_),
outer_count(0),
inner_count(0),
workspace_count(0) {
//
// Plan the parallel mapping strategy.
//
outer_count = 1;
inner_count = 1;
// Compute number of elements in strided ranks
for (int p = 0; p < kReducedRank - 1; ++p) {
outer_count *= extent[p];
}
for (int p = 0; p < kInnerRank; ++p) {
inner_count *= extent[kReducedRank + p - 1];
}
// Compute plan for the reduction
int extent_c = extent[kRank - 1];
int vectors_c = (extent_c -1 + kVectorLength) / kVectorLength;
// Determine CTA shape
int cta_width = kThreads * kVectorLength;
int cta_ways = reshape_pow2(extent_c, cta_width);
int cta_threads_x = kThreads / cta_ways;
threadblock_shape = dim3(cta_threads_x, 1, std::min(cta_ways, 64));
// This leads to an error.
if (threadblock_shape.z > 1) {
if (threadblock_shape.y != 1) {
status = Status::kErrorInternal;
return;
}
}
// Determine grid shape
int cta_count_x = (vectors_c + cta_threads_x - 1) / cta_threads_x;
int cta_count_y = std::max(1, target_threadblock_count / cta_count_x);
// Limit the number of CTAs assigned to outer dimension
if (int64_t(cta_count_y * threadblock_shape.y) > outer_count) {
cta_count_y = int(outer_count + threadblock_shape.y - 1) / threadblock_shape.y;
}
// Limit the number of CTAs assigned to inner dimension
int cta_count_z = std::max(1, target_threadblock_count / cta_count_y);
if (int64_t(cta_count_z * threadblock_shape.z) > inner_count) {
cta_count_z = int(inner_count + threadblock_shape.z - 1) / threadblock_shape.z;
}
grid_shape = dim3(cta_count_x, cta_count_y, cta_count_z);
workspace_count = (cta_count_z > 1 ? cta_count_z : 0);
// Determine shape of final reduction kernel if needed
grid_final = dim3(cta_count_x, int(outer_count));
threadblock_final = dim3(cta_threads_x, 1, 1);
}
/// Simple check to verify the object is initialized correctly
bool good() const {
return status == Status::kSuccess;
}
/// Size of one CTA's workspace
int64_t workspace_stride() const {
// Error condition
if (!good()) {
return 0;
}
int vector_size_bytes = kVectorLength * sizeof_bits<ElementCompute>::value / 8;
return extent[kRank - 1] * vector_size_bytes;
}
/// Returns the size (in bytes) of a temporary workspace needed for reduction across CTAs
int64_t workspace_size() const {
// Error condition
if (!good()) {
return 0;
}
// No reduction across CTAs
if (grid_shape.z == 1) {
return 0;
}
return workspace_stride() * outer_count * grid_shape.z;
}
/// Performs a reduction
Status reduce(
ElementOutput *dst_ptr, ///< Pointer to destination tensor
int64_t dst_stride[], ///< Stride vector (of length kReducedRank - 1)
ElementSource const *src_ptr, ///< Pointer to source tensor
int64_t src_stride[], ///< Stride vector (of length kRank - 1)
void *device_workspace_ptr = nullptr, ///< Device workspace
ElementCompute reduction_identity = ElementCompute(), ///< Reduciton identity
ReductionOp reduction_op = ReductionOp(), ///< Reduction operator
cudaStream_t stream = nullptr) { ///< CUDA Stream into which all kernels are launched
// Initial status check
if (!good()) {
return status;
}
// Guard against null workspace
if (workspace_count > 1 && device_workspace_ptr == nullptr) {
return Status::kErrorWorkspaceNull;
}
// Define reduction kernel
using ReductionKernel = kernel::TensorReductionAffineStrided<
kRank,
kReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
kVectorLength,
ElementCompute,
kThreads>;
using FinalReductionKernel = kernel::TensorReductionAffineStridedFinal<
kRank,
kReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
kVectorLength,
ElementCompute,
kThreads>;
using Params = typename ReductionKernel::Params;
// Construct the parameters
Params params(
extent,
dst_ptr,
dst_stride,
src_ptr,
src_stride,
static_cast<ElementCompute *>(device_workspace_ptr),
workspace_stride(),
workspace_count,
reduction_op,
reduction_identity);
// Shared memory size
int shared_mem_bytes = sizeof(typename ReductionKernel::SharedStorage);
// Launch the kernel
Kernel<ReductionKernel><<< grid_shape, threadblock_shape, shared_mem_bytes, stream >>>(params);
// Check error condition
if (cudaPeekAtLastError() == cudaSuccess) {
status = Status::kSuccess;
}
else {
status = Status::kErrorInternal;
}
// Final reduction kernel
if (workspace_count) {
Kernel<FinalReductionKernel><<< grid_final, threadblock_final, 0, stream >>>(params);
// Check error condition
if (cudaPeekAtLastError() == cudaSuccess) {
status = Status::kSuccess;
}
else {
status = Status::kErrorInternal;
}
}
return status;
}
/// Helper to use overloaded function call operator
Status operator()(
ElementOutput *dst_ptr, ///< Pointer to destination tensor
int64_t dst_stride[], ///< Stride vector (of length kReducedRank - 1)
ElementSource const *src_ptr, ///< Pointer to source tensor
int64_t src_stride[], ///< Stride vector (of length kRank - 1)
void *device_workspace_ptr = nullptr, ///< Pointer to device workspace
ElementCompute reduction_identity = ElementCompute(), ///< Reduciton identity
ReductionOp reduction_op = ReductionOp(), ///< Reduction operator
cudaStream_t stream = nullptr) { ///< CUDA Stream into which all kernels are launched
return reduce(
dst_ptr,
dst_stride,
src_ptr,
src_stride,
device_workspace_ptr,
reduction_identity,
reduction_op,
stream);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reduction
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,448 | C | 30.627072 | 109 | 0.611635 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/reduction/device/tensor_reduce_affine_contiguous.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over one or more ranks of an affine tensor
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/fast_math.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/device_kernel.h"
#include "cutlass/reduction/kernel/tensor_reduce_affine_contiguous.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tensor reduction operator on layouts which are affine
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (e.g. ND => 2)
typename ElementOutput_,
typename ElementSource_,
typename ReductionOp_,
int VectorLength = 1,
typename ElementCompute_ = ElementOutput_,
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
struct TensorReductionAffineContiguous {
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
using ElementOutput = ElementOutput_;
using ElementSource = ElementSource_;
using ReductionOp = ReductionOp_;
using ElementCompute = ElementCompute_;
//
// Data members
//
/// Internal status field
Status status;
/// Extent of tensor in source layout
Coord<kRank> extent;
/// Number of points in the outer index space
int64_t outer_count;
/// Number of elements in the inner index space
int64_t inner_count;
/// Number of workspaces needed
int workspace_count;
/// CUDA Grid shape (.x => contiguous, .y => outer, .z => inner)
dim3 grid_shape;
/// CUDA Threadblock shape (.x => contiguous, .y => outer, .z => inner)
dim3 threadblock_shape;
/// CUDA grid shape for the final reduction step if needed
dim3 grid_final;
/// CUDA threadblock shape for the final reduction step if needed
dim3 threadblock_final;
private:
//
// Methods
//
/// Helper to reshape 'count' such that it is less than 2 x 'ext'
static int reshape_pow2(int ext, int count) {
if (ext > count) {
return 1;
}
int x = 1;
for (; count >= ext * 2; ) {
count >>= 1;
x <<= 1;
}
return x;
}
public:
/// Default ctor
TensorReductionAffineContiguous():
status(Status::kErrorInvalidProblem),
extent(),
outer_count(0),
inner_count(0),
workspace_count(0),
grid_shape(0, 0, 0),
threadblock_shape(0, 0, 0) { }
/// Constructor
TensorReductionAffineContiguous(
Coord<kRank> extent_,
int target_threadblock_count = 128
):
status(Status::kSuccess),
extent(extent_),
outer_count(0),
inner_count(0),
workspace_count(0) {
//
// Plan the parallel mapping strategy.
//
outer_count = 1;
inner_count = 1;
// Compute number of elements in strided ranks
for (int p = 0; p < kReducedRank; ++p) {
outer_count *= extent[p];
}
for (int p = 0; p < kInnerRank; ++p) {
inner_count *= extent[kReducedRank + p];
}
int cta_count_x = 1;
int cta_count_y = 1;
int cta_count_z = 1;
int cta_threads_x = kThreads;
int cta_threads_y = 1;
int cta_threads_z = 1;
// Determine CTA shape
int64_t inner_vector_count = inner_count / kVectorLength;
// Priority 1. Assign threadblocks to outer indices if possible
if (outer_count > target_threadblock_count) {
cta_count_x = 1;
cta_count_y = target_threadblock_count;
cta_count_z = 1;
}
else {
cta_count_y = int(outer_count);
int remaining_ctas = target_threadblock_count / cta_count_y;
// Priority 2. Assign inner dimensions to one CTA
if (inner_vector_count > cta_threads_x) {
int64_t cta_z_bound = inner_vector_count / cta_threads_x;
if (cta_z_bound > remaining_ctas) {
cta_count_z = remaining_ctas;
}
else {
cta_count_z = int(cta_z_bound);
}
}
else {
cta_threads_x = reshape_pow2(int(inner_vector_count), cta_threads_x);
cta_count_z = 1;
}
}
grid_shape = dim3(cta_count_x, cta_count_y, cta_count_z);
threadblock_shape = dim3(cta_threads_x, cta_threads_y, cta_threads_z);
workspace_count = (cta_count_z > 1 ? cta_count_z : 0);
// Determine shape of final reduction kernel if needed
if (workspace_count) {
int final_threads = kThreads;
int final_ctas = 1;
if (outer_count > kThreads) {
final_ctas = int(outer_count + kThreads - 1) / kThreads;
}
else {
final_threads = int(outer_count);
}
grid_final = dim3(final_ctas, 1, 1);
threadblock_final = dim3(final_threads, 1, 1);
}
else {
grid_final = dim3(0, 0, 0);
threadblock_final = dim3(0, 0, 0);
}
}
/// Simple check to verify the object is initialized correctly
bool good() const {
return status == Status::kSuccess;
}
/// Size (in bytes) of <outer_count> workspace elements which are densely packed together
int64_t workspace_stride() const {
// Error condition
if (!good()) {
return 0;
}
return outer_count * sizeof_bits<ElementCompute>::value / 8;
}
/// Returns the size (in bytes) of a temporary workspace needed for reduction across CTAs
int64_t workspace_size() const {
// Error condition
if (!good()) {
return 0;
}
// No reduction across CTAs
if (grid_shape.z == 1) {
return 0;
}
return workspace_stride() * grid_shape.z;
}
/// Performs a reduction
Status reduce(
ElementOutput *dst_ptr, ///< Pointer to destination tensor
int64_t dst_stride[], ///< Stride vector (of length kReducedRank - 1)
ElementSource const *src_ptr, ///< Pointer to source tensor
int64_t src_stride[], ///< Stride vector (of length kRank - 1)
void *device_workspace_ptr = nullptr, ///< Device workspace
ElementCompute reduction_identity = ElementCompute(), ///< Reduction identity element
ReductionOp reduction_op = ReductionOp(), ///< Reduction operator
cudaStream_t stream = nullptr) { ///< CUDA Stream into which all kernels are launched
// Initial status check
if (!good()) {
return status;
}
// Guard against null workspace
if (workspace_count > 1 && device_workspace_ptr == nullptr) {
return Status::kErrorWorkspaceNull;
}
// Define reduction kernel
using ReductionKernel = kernel::TensorReductionAffineContiguous<
kRank,
kReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
kVectorLength,
ElementCompute,
kThreads>;
using FinalReductionKernel = kernel::TensorReductionAffineContiguousFinal<
kRank,
kReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
kVectorLength,
ElementCompute,
kThreads>;
using Params = typename ReductionKernel::Params;
// Construct the parameters
Params params(
extent,
dst_ptr,
dst_stride,
src_ptr,
src_stride,
static_cast<ElementCompute *>(device_workspace_ptr),
workspace_stride(),
workspace_count,
reduction_op,
reduction_identity);
// Shared memory size
int shared_mem_bytes = sizeof(typename ReductionKernel::SharedStorage);
// Launch the kernel
Kernel<ReductionKernel><<< grid_shape, threadblock_shape, shared_mem_bytes, stream >>>(params);
// Check error condition
if (cudaPeekAtLastError() == cudaSuccess) {
status = Status::kSuccess;
}
else {
status = Status::kErrorInternal;
}
// Final reduction kernel
if (workspace_count) {
Kernel<FinalReductionKernel><<< grid_final, threadblock_final, 0, stream >>>(params);
}
// Check error condition
if (cudaPeekAtLastError() == cudaSuccess) {
status = Status::kSuccess;
}
else {
status = Status::kErrorInternal;
}
return status;
}
/// Helper to use overloaded function call operator
Status operator()(
ElementOutput *dst_ptr, ///< Pointer to destination tensor
int64_t dst_stride[], ///< Stride vector (of length kReducedRank - 1)
ElementSource const *src_ptr, ///< Pointer to source tensor
int64_t src_stride[], ///< Stride vector (of length kRank - 1)
void *device_workspace_ptr = nullptr, ///< Pointer to device workspace
ElementCompute reduction_identity = ElementCompute(), ///< Reduction identity element
ReductionOp reduction_op = ReductionOp(), ///< Reduction operator
cudaStream_t stream = nullptr) { ///< CUDA Stream into which all kernels are launched
return reduce(dst_ptr, dst_stride, src_ptr, src_stride, device_workspace_ptr, reduction_identity, reduction_op, stream);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reduction
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,579 | C | 29.962567 | 124 | 0.60817 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/reduction/device/reduce_split_k.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over densely packed tensors in global memory
*/
#pragma once
#include "cutlass/device_kernel.h"
#include "cutlass/reduction/kernel/reduce_split_k.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ReductionKernel_
>
class ReduceSplitK {
public:
using ReductionKernel = ReductionKernel_;
using Shape = typename ReductionKernel::Shape;
using ReductionOp = typename ReductionKernel::ReductionOp;
using OutputOp = typename ReductionKernel::OutputOp;
using ElementWorkspace = typename ReductionKernel::ElementWorkspace;
using ElementAccumulator = typename ReductionKernel::ElementAccumulator;
using ElementOutput = typename ReductionKernel::ElementOutput;
using WorkspaceTensorRef = typename ReductionKernel::WorkspaceTensorRef;
using OutputTensorRef = typename ReductionKernel::OutputTensorRef;
using StrideIndex = typename ReductionKernel::StrideIndex;
/// Argument structure
struct Arguments {
//
// Data members
//
MatrixCoord problem_size;
int partitions;
size_t partition_stride;
WorkspaceTensorRef workspace;
OutputTensorRef destination;
OutputTensorRef source;
typename OutputOp::Params output;
typename ReductionOp::Params reduction;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments() :
problem_size(0, 0),
partitions(1),
partition_stride(0) { }
CUTLASS_HOST_DEVICE
Arguments(
MatrixCoord const & problem_size
):
problem_size(problem_size) { }
CUTLASS_HOST_DEVICE
Arguments(
MatrixCoord problem_size_,
int partitions_,
size_t partition_stride_,
WorkspaceTensorRef workspace_,
OutputTensorRef destination_,
OutputTensorRef source_,
typename OutputOp::Params output_ = typename OutputOp::Params(),
typename ReductionOp::Params reduction_ = typename ReductionOp::Params()
):
problem_size(problem_size_),
partitions(partitions_),
partition_stride(partition_stride_),
workspace(workspace_),
destination(destination_),
source(source_),
output(output_),
reduction(reduction_)
{
}
};
private:
/// Kernel parameters object
typename ReductionKernel::Params params_;
public:
/// Constructs Reduction SplitK
ReduceSplitK() { }
/// Determines whether the ReduceSplitK can execute the given problem.
static Status can_implement(Arguments const &args) {
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
// needs no additional workspace
return 0;
}
/// Initializes Reduction state from arguments.
Status initialize(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
// initialize the params structure from the arguments
params_ = typename ReductionKernel::Params(
args.problem_size,
args.partitions,
args.partition_stride,
args.workspace,
args.destination,
args.source,
args.output,
args.reduction
);
return Status::kSuccess;
}
/// Initializes Reduction kernel state from arguments.
Status update(Arguments const &args, void *workspace = nullptr) {
// update the params structure from the arguments
params_.workspace.reset(args.workspace.non_const_ref().data());
params_.destination.reset(args.destination.non_const_ref().data());
params_.source.reset(args.source.non_const_ref().data());
params_.output = args.output;
params_.reduction = args.reduction;
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
//
// Launch reduction kernel
//
dim3 block = ReductionKernel::block_shape();
dim3 grid = ReductionKernel::grid_shape(params_.problem_size);
Kernel<ReductionKernel><<< grid, block, 0, stream >>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace reduction
} // namespace cutlass
| 6,823 | C | 29.464286 | 100 | 0.656456 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/reduction/device/tensor_reduce.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over one or more ranks of an affine tensor
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/fast_math.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/device_kernel.h"
#include "cutlass/reduction/device/tensor_reduce_affine_strided.h"
#include "cutlass/reduction/device/tensor_reduce_affine_contiguous.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tensor reduction operator on specific CUTLASS layouts over exactly one index
template <
typename ElementOutput_,
typename ElementSource_,
typename Layout_,
typename ReductionOp_,
int VectorLength_ = 1,
typename ElementCompute_ = ElementOutput_
>
struct TensorReduction {
using ElementOutput = ElementOutput_;
using ElementSource = ElementSource_;
using Layout = Layout_;
using ReductionOp = ReductionOp_;
static int const kVectorLength = VectorLength_;
using ElementCompute = ElementCompute_;
using TensorCoord = typename Layout::TensorCoord;
/// Reduction operator
using ReductionDeviceStridedOperator = TensorReductionAffineStrided<
4, 3, ElementOutput, ElementSource, ReductionOp, kVectorLength, ElementCompute
>;
using ReductionDeviceContiguousOperator = TensorReductionAffineContiguous<
4, 3, ElementOutput, ElementSource, ReductionOp, kVectorLength, ElementCompute
>;
//
// Data members
//
ReductionDeviceStridedOperator reduction_strided;
ReductionDeviceContiguousOperator reduction_contiguous;
int reduction_index;
//
// Methods
//
///
TensorReduction(
TensorCoord extent,
int reduction_index_
):
reduction_index(reduction_index_) {
Coord<4> extent_affine;
switch (reduction_index) {
case 0:
extent_affine[0] = extent[1];
extent_affine[1] = extent[2];
extent_affine[2] = extent[0];
extent_affine[3] = extent[3];
break;
case 1:
extent_affine[0] = extent[0];
extent_affine[1] = extent[2];
extent_affine[2] = extent[1];
extent_affine[3] = extent[3];
break;
case 2:
extent_affine[0] = extent[0];
extent_affine[1] = extent[1];
extent_affine[2] = extent[2];
extent_affine[3] = extent[3];
break;
case 3:
extent_affine[0] = extent[0];
extent_affine[1] = extent[1];
extent_affine[2] = extent[2];
extent_affine[3] = extent[3];
break;
default: break;
}
if (reduction_index == 3) {
reduction_contiguous = ReductionDeviceContiguousOperator(extent_affine);
}
else {
reduction_strided = ReductionDeviceStridedOperator(extent_affine);
}
}
/// Simple check to verify the object is initialized correctly
bool good() const {
if (reduction_index == 3) {
return reduction_contiguous.good();
}
return reduction_strided.good();
}
/// Size of one workspace
int64_t workspace_stride() const {
if (reduction_index == 3) {
return reduction_contiguous.workspace_stride();
}
else {
return reduction_strided.workspace_stride();
}
}
/// Returns the size (in bytes) of a temporary workspace needed for reduction across CTAs
int64_t workspace_size() const {
if (reduction_index == 3) {
return reduction_contiguous.workspace_size();
}
else {
return reduction_strided.workspace_size();
}
}
/// Helper to use overloaded function call operator
Status reduce(
TensorRef<ElementOutput, Layout> dst_ref,
TensorRef<ElementSource, Layout> src_ref,
void *device_workspace_ptr = nullptr,
ElementCompute reduction_identity = ElementCompute(),
ReductionOp reduction_op = ReductionOp(),
cudaStream_t stream = nullptr) {
int64_t src_stride[3];
int64_t dst_stride[3];
switch (reduction_index) {
case 0:
src_stride[0] = src_ref.stride()[1];
src_stride[1] = src_ref.stride()[0];
src_stride[2] = src_ref.stride()[2];
dst_stride[0] = dst_ref.stride()[1];
dst_stride[1] = dst_ref.stride()[0];
break;
case 1:
src_stride[0] = src_ref.stride()[2];
src_stride[1] = src_ref.stride()[0];
src_stride[2] = src_ref.stride()[1];
dst_stride[0] = dst_ref.stride()[2];
dst_stride[1] = dst_ref.stride()[0];
break;
case 2:
src_stride[0] = src_ref.stride()[2];
src_stride[1] = src_ref.stride()[1];
src_stride[2] = src_ref.stride()[0];
dst_stride[0] = dst_ref.stride()[2];
dst_stride[1] = dst_ref.stride()[1];
break;
case 3:
src_stride[0] = src_ref.stride()[2];
src_stride[1] = src_ref.stride()[1];
src_stride[2] = src_ref.stride()[0];
dst_stride[0] = dst_ref.stride()[2];
dst_stride[1] = dst_ref.stride()[1];
dst_stride[2] = dst_ref.stride()[0];
default: break;
}
if (reduction_index == 3) {
return reduction_contiguous(
dst_ref.data(),
dst_stride,
src_ref.data(),
src_stride,
device_workspace_ptr,
reduction_identity,
reduction_op,
stream);
}
else {
return reduction_strided(
dst_ref.data(),
dst_stride,
src_ref.data(),
src_stride,
device_workspace_ptr,
reduction_identity,
reduction_op,
stream);
}
}
Status operator()(
TensorRef<ElementOutput, Layout> dst_ref,
TensorRef<ElementSource, Layout> src_ref,
void *device_workspace_ptr = nullptr,
ElementCompute reduction_identity = ElementCompute(),
ReductionOp reduction_op = ReductionOp(),
cudaStream_t stream = nullptr) {
return reduce(
dst_ref,
src_ref,
device_workspace_ptr,
reduction_identity,
reduction_op,
stream);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reduction
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,152 | C | 29.766038 | 100 | 0.61629 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/reduction/kernel/tensor_reduce_affine_strided.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over one or more ranks of an affine tensor
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/fast_math.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/device_kernel.h"
#include "cutlass/reduction/thread/reduction_operators.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
/// Parameters structure
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
struct TensorReductionAffineStridedParams {
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
Coord<kRank> extent; /// Extent of source tensor
FastDivmodU64 divmod[kRank - 1]; /// FastDivmod by each strided rank
int64_t dst_stride[kReducedRank - 1]; /// stride (units of bytes) - I, J
int64_t src_stride[kRank - 1]; /// stride (units of bytes) - I, J, K
int64_t workspace_stride; /// stride (units of bytes) between workspace
int64_t workspace_outer_stride; /// stride (units of bytes) between 'rows' of the workspace
int workspace_count; /// number of workspaces
uint64_t inner_count; /// Number of elements in reduced index space
uint64_t outer_count; /// Number of elements in outer index space
ElementOutput * destination; /// Pointer to output tensor of rank kReducedRank
ElementSource const * source; /// Poitner to source pointer of rank kRank
ReductionOp reduction_op; /// Reduction operator
ElementCompute reduction_identity; /// Identity element for reduction operator
ElementCompute *device_workspace; /// Pointer to device workspace for inter-CTA reductions
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorReductionAffineStridedParams() {
}
/// Ctor
TensorReductionAffineStridedParams(
Coord<kRank> extent_, ///< Extent of source tensor
ElementOutput * dst_ptr_, ///< Output tensor data
int64_t dst_stride_[], ///< Stride (units of elements)
ElementSource const * src_ptr_, ///< Source tensor data
int64_t src_stride_[], ///< Stride (units of elements)
ElementCompute *device_workspace_, ///< Pointer to device workspace for inter-CTA reductions
int64_t workspace_stride_, ///< Stride between workspaces
int workspace_count_, ///< Number of workspaces
ReductionOp reduction_op_, ///< Reduction operator
ElementCompute reduction_identity_ = ElementCompute() ///< Identity element for reduction operator
):
extent(extent_),
inner_count(1),
outer_count(1),
destination(dst_ptr_),
source(src_ptr_),
device_workspace(device_workspace_),
workspace_outer_stride(0),
workspace_stride(workspace_stride_),
workspace_count(workspace_count_),
reduction_op(reduction_op_),
reduction_identity(reduction_identity_) {
// Initialize divisors for fast div-mod
for (int p = 1; p < kRank; ++p) {
divmod[p - 1] = FastDivmodU64(uint64_t(extent[p]));
}
int input_size_bits = sizeof_bits<ElementSource>::value;
int output_size_bits = sizeof_bits<ElementOutput>::value;
workspace_outer_stride = workspace_stride * workspace_count;
// Compute strides in units of bytes
for (int p = 0; p < kReducedRank - 1; ++p) {
dst_stride[p] = dst_stride_[p] * output_size_bits / 8;
}
for (int p = 0; p < kRank - 1; ++p) {
src_stride[p] = src_stride_[p] * input_size_bits / 8;
}
// Compute number of elements in strided ranks
for (int p = 0; p < kReducedRank - 1; ++p) {
outer_count *= uint64_t(extent[p]);
}
for (int p = 0; p < kInnerRank; ++p) {
inner_count *= uint64_t(extent[kReducedRank + p - 1]);
}
}
};
/// Kernel to reduce a tensor with affine layout over a set of ranks *EXCLUDING* the contiguous
/// rank. This leads to favorable vectorized memory accesses over the contiguous rank.
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
class TensorReductionAffineStrided {
public:
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
using ComputeFragment = Array<ElementCompute, VectorLength>;
using SourceFragment = AlignedArray<ElementSource, VectorLength>;
using OutputFragment = AlignedArray<ElementOutput, VectorLength>;
/// Shared memory allocation used for reduction within the CTA
struct SharedStorage {
Array<ElementCompute, kThreads * kVectorLength> workspace;
};
/// Parameters structure
using Params = TensorReductionAffineStridedParams<
Rank,
ReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
VectorLength,
ElementCompute,
Threads,
BatchSize
>;
private:
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_inner_coord_and_offset_(
Params const ¶ms,
Coord<kInnerRank> & coord,
int64_t &src_offset,
uint64_t linear_idx) const {
// Decompose into coordinate
coord = CoordinateDecomposition<kInnerRank>(linear_idx, ¶ms.divmod[kReducedRank - 1]);
// Compute linear offset
src_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kInnerRank; ++i) {
src_offset += params.src_stride[kReducedRank + i - 1] * coord[i];
}
}
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_outer_coord_and_offset_(
Params const ¶ms,
Coord<kReducedRank - 1> & coord,
int64_t &dst_offset,
int64_t &src_offset,
uint64_t linear_idx) const {
// Decompose linear coordinate
coord = CoordinateDecomposition<kReducedRank - 1>(linear_idx, params.divmod);
// Compute offset into tensors
dst_offset = 0;
src_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kReducedRank - 1; ++i) {
dst_offset += params.dst_stride[i] * coord[i];
src_offset += params.src_stride[i] * coord[i];
}
}
/// Reduces over the reduction indices
CUTLASS_DEVICE
ComputeFragment reduce_indices_(
Params const ¶ms,
ElementCompute *threadblock_workspace,
char const *src_byte_ptr) {
NumericArrayConverter<ElementCompute, ElementSource, VectorLength> convert_source;
ReductionOp reduction_op(params.reduction_op);
// Accumulated output
ComputeFragment identity_frag;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < identity_frag.size(); ++i) {
identity_frag[i] = params.reduction_identity;
}
if (!params.inner_count) {
return identity_frag;
}
ComputeFragment accumulator = identity_frag;
// Compute the coordinate of the first access
int64_t src_byte_offset = 0;
Coord<kInnerRank> coord;
uint64_t linear_idx = threadIdx.z + blockIdx.z * blockDim.z;
compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx);
// Load the first vector
SourceFragment source_fragment[kBatchSize];
bool not_done = true;
// Iterate over vectors in a linearized reduction index space
while (not_done) {
bool guards[kBatchSize];
// Issue a batch of loads
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (linear_idx < params.inner_count) {
source_fragment[b] = *reinterpret_cast<SourceFragment const *>(src_byte_ptr + src_byte_offset);
guards[b] = true;
}
else {
guards[b] = false;
not_done = false;
}
linear_idx += blockDim.z * gridDim.z;
compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx);
}
// Perform a batch of reduction operations
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (guards[b]) {
auto cvt = convert_source(source_fragment[b]);
accumulator = cutlass::reduction::thread::detail::ApplyArrayOperator(
reduction_op,
accumulator,
cvt);
}
}
};
// Optional reduction within a CTA
if (blockDim.z > 1) {
// Linearized thread ID
int thread_idx = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
// all threads store to workspace
ComputeFragment *frag_ptr = reinterpret_cast<ComputeFragment *>(threadblock_workspace);
frag_ptr[thread_idx] = accumulator;
__syncthreads();
if (threadIdx.z == 0) {
// Load all additional block indices
for (int z = 1; z < blockDim.z; ++z) {
ComputeFragment frag = frag_ptr[thread_idx + z * blockDim.x * blockDim.y];
accumulator = cutlass::reduction::thread::detail::ApplyArrayOperator(
reduction_op,
accumulator,
frag);
}
}
__syncthreads();
}
return accumulator;
}
public:
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
int coord_c = (blockIdx.x * blockDim.x + threadIdx.x) * kVectorLength;
char const * src_byte_ptr = reinterpret_cast<char const *>(params.source + coord_c);
char * dst_byte_ptr = nullptr;
// If performing a reduction across CTAs, redirect output to device workspace
if (gridDim.z == 1) {
dst_byte_ptr = reinterpret_cast<char *>(params.destination + coord_c);
}
else {
dst_byte_ptr = reinterpret_cast<char *>(params.device_workspace + coord_c);
}
// If the C index is out of bounds, exit
if (coord_c >= params.extent[kRank - 1]) {
return;
}
int64_t idx_linear = blockIdx.y * blockDim.y + threadIdx.y;
// Use modulo division to compute location
Coord<kReducedRank - 1> outer_coord;
int64_t dst_byte_offset;
int64_t src_byte_offset;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
if (gridDim.z == 1) {
/// Complete the reduction with no workspace
while (idx_linear < params.outer_count) {
ComputeFragment result;
result = reduce_indices_(
params,
shared_storage.workspace.data(),
src_byte_ptr + src_byte_offset);
// Store the result after possible final reduction within the CTA
if (threadIdx.z == 0) {
// Convert to output type and store
NumericArrayConverter<ElementOutput, ElementCompute, VectorLength> convert_output;
auto cvt = convert_output(result);
*reinterpret_cast<OutputFragment *>(dst_byte_ptr + dst_byte_offset) =
reinterpret_cast<OutputFragment const &>(cvt);
}
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
} // while
}
else {
/// Complete the reduction with a device workspace
while (idx_linear < params.outer_count) {
ComputeFragment result;
result = reduce_indices_(
params,
shared_storage.workspace.data(),
src_byte_ptr + src_byte_offset);
// Store the result after possible final reduction within the CTA
if (threadIdx.z == 0) {
int64_t byte_offset =
blockIdx.z * params.workspace_stride + idx_linear * params.workspace_outer_stride;
// No conversion - store in compute type
*reinterpret_cast<ComputeFragment *>(dst_byte_ptr + byte_offset) =
reinterpret_cast<ComputeFragment const &>(result);
}
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
} // while (outer index)
} // if ()
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to perform final reduction
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
class TensorReductionAffineStridedFinal {
public:
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
using ComputeFragment = Array<ElementCompute, VectorLength>;
using SourceFragment = AlignedArray<ElementSource, VectorLength>;
using OutputFragment = AlignedArray<ElementOutput, VectorLength>;
/// Shared memory
struct SharedStorage { };
/// Parameters structure
using Params = TensorReductionAffineStridedParams<
Rank,
ReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
VectorLength,
ElementCompute,
Threads,
BatchSize
>;
private:
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_outer_coord_and_offset_(
Params const ¶ms,
Coord<kReducedRank - 1> & coord,
int64_t &dst_offset,
uint64_t linear_idx) const {
// Decompose linear index
coord = CoordinateDecomposition<kReducedRank - 1>(linear_idx, params.divmod);
// Compute tensor offset
dst_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kReducedRank - 1; ++i) {
dst_offset += params.dst_stride[i] * coord[i];
}
}
/// Reduces over the reduction indices
CUTLASS_DEVICE
ComputeFragment reduce_indices_(
Params const ¶ms,
char *src_byte_ptr) {
ReductionOp reduction_op(params.reduction_op);
// Accumulated output
ComputeFragment identity_frag;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < identity_frag.size(); ++i) {
identity_frag[i] = params.reduction_identity;
}
ComputeFragment accumulator = identity_frag;
ComputeFragment workspace_fragments[kBatchSize];
// Partially unrolled loop
for (int idx = 0; idx < params.workspace_count; idx += kBatchSize) {
// Issue a batch of loads
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (idx + b < params.workspace_count) {
workspace_fragments[b] =
*reinterpret_cast<ComputeFragment *>(src_byte_ptr);
}
else {
workspace_fragments[b] = identity_frag;
}
src_byte_ptr += + params.workspace_stride;
}
// Perform a reduction
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorLength; ++i) {
accumulator[i] = reduction_op(accumulator[i], workspace_fragments[b][i]);
}
}
}
return accumulator;
}
public:
//
// Methods
//
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
int coord_c = (blockIdx.x * blockDim.x + threadIdx.x) * kVectorLength;
char * src_byte_ptr = reinterpret_cast<char *>(params.device_workspace + coord_c);
char * dst_byte_ptr = reinterpret_cast<char *>(params.destination + coord_c);
// If the C index is out of bounds, exit
if (coord_c >= params.extent[kRank - 1]) {
return;
}
int64_t idx_linear = blockIdx.y * blockDim.y + threadIdx.y;
// Use modulo division to compute location
Coord<kReducedRank - 1> outer_coord;
int64_t dst_byte_offset;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
idx_linear);
/// Complete the reduction
while (idx_linear < params.outer_count) {
int64_t src_byte_offset = idx_linear * params.workspace_outer_stride;
ComputeFragment result = reduce_indices_(
params,
src_byte_ptr + src_byte_offset);
// Convert to output type and store
NumericArrayConverter<ElementOutput, ElementCompute, VectorLength> convert_output;
auto cvt = convert_output(result);
*reinterpret_cast<OutputFragment *>(dst_byte_ptr + dst_byte_offset) =
reinterpret_cast<OutputFragment const &>(cvt);
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
idx_linear);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace reduction
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 21,662 | C | 32.742991 | 109 | 0.608808 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/reduction/kernel/tensor_reduce_affine_contiguous.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over one or more ranks of an affine tensor
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/fast_math.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/device_kernel.h"
#include "cutlass/reduction/thread/reduction_operators.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (i.e. number of outer ranks)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
struct TensorReductionAffineContiguousParams {
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
Coord<kRank> extent; /// Extent of source tensor
FastDivmodU64 divmod[kRank - 1]; /// FastDivmod by each strided rank
int64_t dst_stride[kReducedRank]; /// stride (units of bytes) - I, J
int64_t src_stride[kRank - 1]; /// stride (units of bytes) - I, J, K
int64_t workspace_stride; /// stride (units of bytes) between workspace
int workspace_count; /// number of workspaces
uint64_t inner_count; /// Number of elements in reduced index space
uint64_t outer_count; /// Number of elements in outer index space
ElementOutput * destination; /// Pointer to output tensor of rank kReducedRank
ElementSource const * source; /// Poitner to source pointer of rank kRank
ReductionOp reduction_op; /// Reduction operator
ElementCompute reduction_identity; /// Identity element used by reduction operator
ElementCompute *device_workspace; /// Pointer to device workspace for inter-CTA reductions
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorReductionAffineContiguousParams() {
}
/// Ctor
TensorReductionAffineContiguousParams(
Coord<kRank> extent_, ///< Extent of source tensor
ElementOutput * dst_ptr_, ///< Output tensor data
int64_t dst_stride_[], ///< Stride (units of elements)
ElementSource const * src_ptr_, ///< Source tensor data
int64_t src_stride_[], ///< Stride (units of elements)
ElementCompute *device_workspace_, ///< Pointer to device workspace for inter-CTA reductions
int64_t workspace_stride_, ///< Stride between workspaces
int workspace_count_, ///< Number of workspaces
ReductionOp reduction_op_, ///< Reduction operator
ElementCompute reduction_identity_ = ElementCompute() ///< Identity element used by reduction operator
):
extent(extent_),
inner_count(1),
outer_count(1),
destination(dst_ptr_),
source(src_ptr_),
device_workspace(device_workspace_),
workspace_stride(workspace_stride_),
workspace_count(workspace_count_),
reduction_op(reduction_op_),
reduction_identity(reduction_identity_) {
// Initialize divisors for fast div-mod
for (int p = 1; p < kRank; ++p) {
divmod[p - 1] = FastDivmodU64(uint64_t(extent[p]));
}
int input_size_bits = sizeof_bits<ElementSource>::value;
int output_size_bits = sizeof_bits<ElementOutput>::value;
// Compute strides in units of bytes
for (int p = 0; p < kReducedRank; ++p) {
dst_stride[p] = dst_stride_[p] * output_size_bits / 8;
}
for (int p = 0; p < kRank - 1; ++p) {
src_stride[p] = src_stride_[p] * input_size_bits / 8;
}
// Compute number of elements in strided ranks
for (int p = 0; p < kReducedRank; ++p) {
outer_count *= uint64_t(extent[p]);
}
for (int p = 0; p < kInnerRank; ++p) {
inner_count *= uint64_t(extent[kRank - 1 - p]);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to reduce a tensor with affine layout over a set of ranks *INCLUDING* the contiguous
/// rank. This leads to favorable vectorized memory accesses over the contiguous rank.
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
class TensorReductionAffineContiguous {
public:
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
using ComputeFragment = Array<ElementCompute, VectorLength>;
using SourceFragment = AlignedArray<ElementSource, VectorLength>;
using OutputFragment = AlignedArray<ElementOutput, VectorLength>;
/// Shared memory allocation used for reduction within the CTA
struct SharedStorage {
Array<ElementCompute, kThreads * kVectorLength> workspace;
};
/// Parameters structure
using Params = TensorReductionAffineContiguousParams<
Rank,
ReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
VectorLength,
ElementCompute,
Threads,
BatchSize
>;
private:
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_inner_coord_and_offset_(
Params const ¶ms,
Coord<kInnerRank> & coord,
int64_t &src_offset,
uint64_t linear_idx) const {
// Decompose into a coordinate of rank <kInnerRank>
coord = CoordinateDecomposition<kInnerRank>(linear_idx, ¶ms.divmod[kRank - kInnerRank]);
// Compute an offset using the souce stride
src_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kInnerRank - 1; ++i) {
src_offset += coord[i] * params.src_stride[kReducedRank + i];
}
src_offset += coord[kInnerRank - 1] * sizeof_bits<ElementSource>::value / 8;
}
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_outer_coord_and_offset_(
Params const ¶ms,
Coord<kReducedRank> & coord,
int64_t &dst_offset,
int64_t &src_offset,
uint64_t linear_idx) const {
// Decompose into coordinate of rank <kReducedRank>
coord = CoordinateDecomposition<kReducedRank>(linear_idx, params.divmod);
// Compute offsets using destination and source strides
dst_offset = 0;
src_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kReducedRank; ++i) {
dst_offset += params.dst_stride[i] * coord[i];
src_offset += params.src_stride[i] * coord[i];
}
}
/// Reduces over the reduction indices yielding a single element
CUTLASS_DEVICE
ElementCompute reduce_indices_(
Params const ¶ms,
ElementCompute *threadblock_workspace,
char const *src_byte_ptr,
int coord_c) {
NumericArrayConverter<ElementCompute, ElementSource, VectorLength> convert_source;
ReductionOp reduction_op(params.reduction_op);
//
// Early exit or initialize to identity element
//
if (!params.inner_count) {
return params.reduction_identity;
}
ComputeFragment accumulator;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < accumulator.size(); ++i) {
accumulator[i] = params.reduction_identity;
}
// Compute the coordinate of the first access
int64_t src_byte_offset = 0;
Coord<kInnerRank> coord;
uint64_t linear_idx = (threadIdx.x + blockDim.x * threadIdx.z + blockDim.x * blockIdx.z * blockDim.z) * kVectorLength;
compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx);
// Load the first vector
SourceFragment source_fragment[kBatchSize];
bool not_done = true;
// Iterate over vectors in a linearized reduction index space
while (not_done) {
bool guards[kBatchSize];
// Issue a batch of loads
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (linear_idx < params.inner_count) {
source_fragment[b] = *reinterpret_cast<SourceFragment const *>(src_byte_ptr + src_byte_offset);
guards[b] = true;
}
else {
guards[b] = false;
not_done = false;
}
linear_idx += (blockDim.z * gridDim.z * blockDim.x) * kVectorLength;
compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx);
}
// Perform a batch of reduction operations
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (guards[b]) {
auto cvt = convert_source(source_fragment[b]);
accumulator = cutlass::reduction::thread::detail::ApplyArrayOperator(
reduction_op,
accumulator,
cvt);
}
}
};
//
// Reduction of vectors to scalar
//
ElementCompute reduced_accumulator = accumulator[0];
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < kVectorLength; ++i) {
reduced_accumulator = reduction_op(reduced_accumulator, accumulator[i]);
}
//
// Reduction within CTA across threadIdx.xz => threadIdx{.x = 0, .z = 0}
//
// This re-arranges data so threadIdx.y is effectively a row index and threadIdx.xz is a column
//
int thread_count = blockDim.x * blockDim.z;
int thread_j = threadIdx.x + blockDim.x * threadIdx.z;
int thread_i = threadIdx.y;
ElementCompute *frag_ptr = reinterpret_cast<ElementCompute *>(threadblock_workspace) + thread_i * thread_count;
frag_ptr[thread_j] = reduced_accumulator;
//
// Reduce
//
CUTLASS_PRAGMA_NO_UNROLL
while (thread_count > 1) {
thread_count /= 2;
__syncthreads();
if (thread_j < thread_count) {
ElementCompute other = frag_ptr[thread_j + thread_count];
reduced_accumulator = reduction_op(reduced_accumulator, other);
frag_ptr[thread_j] = reduced_accumulator;
}
__syncthreads();
}
return reduced_accumulator;
}
public:
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
int coord_c = (blockIdx.x * blockDim.x + threadIdx.x) * kVectorLength;
char const * src_byte_ptr = reinterpret_cast<char const *>(params.source);
char * dst_byte_ptr = nullptr;
// If performing a reduction across CTAs, redirect output to device workspace
if (gridDim.z == 1) {
dst_byte_ptr = reinterpret_cast<char *>(params.destination);
}
else {
dst_byte_ptr = reinterpret_cast<char *>(params.device_workspace);
}
uint64_t idx_linear = blockIdx.y * blockDim.y + threadIdx.y;
// Use modulo division to compute location
Coord<kReducedRank> outer_coord;
int64_t dst_byte_offset;
int64_t src_byte_offset;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
if (gridDim.z == 1) {
/// Complete the reduction with no workspace
while (idx_linear < params.outer_count) {
ElementCompute result = reduce_indices_(
params,
shared_storage.workspace.data(),
src_byte_ptr + src_byte_offset,
coord_c);
// Store the result after possible final reduction within the CTA
if (threadIdx.z == 0 && threadIdx.x == 0) {
// Convert to output type and store
NumericConverter<ElementOutput, ElementCompute> convert_output;
ElementOutput cvt = convert_output(result);
*reinterpret_cast<ElementOutput *>(dst_byte_ptr + dst_byte_offset) = cvt;
}
__syncthreads();
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
} // while
}
else {
/// Complete the reduction with workspace
while (idx_linear < params.outer_count) {
ElementCompute result = reduce_indices_(
params,
shared_storage.workspace.data(),
src_byte_ptr + src_byte_offset,
coord_c);
int64_t byte_offset =
blockIdx.z * params.workspace_stride + idx_linear * sizeof_bits<ElementCompute>::value / 8;
// Store the result for final reduction
if (threadIdx.z == 0 && threadIdx.x == 0) {
*reinterpret_cast<ElementCompute *>(dst_byte_ptr + byte_offset) = result;
}
__syncthreads();
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
} // while
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to perform final reduction
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
class TensorReductionAffineContiguousFinal {
public:
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
/// Shared memory
struct SharedStorage { };
/// Parameters structure
using Params = TensorReductionAffineContiguousParams<
Rank,
ReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
VectorLength,
ElementCompute,
Threads,
BatchSize
>;
private:
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_outer_coord_and_offset_(
Params const ¶ms,
Coord<kReducedRank> & coord,
int64_t &dst_offset,
uint64_t linear_idx) const {
// Decompose into coordinate of rank <kReducedRank>
coord = CoordinateDecomposition<kReducedRank>(linear_idx, params.divmod);
// Compute offsets using destination and source strides
dst_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kReducedRank; ++i) {
dst_offset += params.dst_stride[i] * coord[i];
}
}
/// Reduces over the reduction indices
CUTLASS_DEVICE
ElementCompute reduce_indices_(
Params const ¶ms,
ElementCompute const *device_workspace) {
ReductionOp reduction_op(params.reduction_op);
char const *src_byte_ptr = reinterpret_cast<char const *>(device_workspace);
// Accumulated output
ElementCompute accumulator = params.reduction_identity;
for (int iter = 0; iter < params.workspace_count; ++iter) {
ElementCompute workspace_item = *reinterpret_cast<ElementCompute const *>(src_byte_ptr);
accumulator = reduction_op(accumulator, workspace_item);
src_byte_ptr += params.workspace_stride;
}
return accumulator;
}
public:
//
// Methods
//
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
uint64_t idx_linear = blockIdx.x * blockDim.x + threadIdx.x;
char * dst_byte_ptr = reinterpret_cast<char *>(params.destination);
// Use modulo division to compute location
Coord<kReducedRank> outer_coord;
int64_t dst_byte_offset;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
idx_linear);
/// Complete the reduction
while (idx_linear < params.outer_count) {
ElementCompute result = reduce_indices_(params, params.device_workspace + idx_linear);
// Convert to output type and store
NumericConverter<ElementOutput, ElementCompute> convert_output;
*reinterpret_cast<ElementOutput *>(dst_byte_ptr + dst_byte_offset) = convert_output(result);
// Update indices and pointers
idx_linear += gridDim.x * blockDim.x;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
idx_linear);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace reduction
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 20,685 | C | 33.079077 | 122 | 0.611796 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/reduction/kernel/reduce_split_k.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over densely packed tensors in global memory
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/layout/matrix.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_, ///< shape of CTA (concept: MatrixShape)
typename OutputOp_ , ///< output operator (concept: epilogue::thread operator)
typename ReductionOp_, ///< reduction operator (concept: ReductionOperator)
int PartitionsPerStage = 4 ///< number of partitions to issue
>
class ReduceSplitK {
public:
using Shape = Shape_;
using ReductionOp = ReductionOp_;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = OutputOp::kCount;
static int const kPartitionsPerStage = PartitionsPerStage;
using ElementWorkspace = typename ReductionOp::Element;
using ElementAccumulator = typename ReductionOp::ElementAccumulator;
using ElementOutput = typename OutputOp::ElementOutput;
using WorkspaceTensorRef = TensorRef<ElementWorkspace, layout::RowMajor>;
using OutputTensorRef = TensorRef<ElementOutput, layout::RowMajor>;
using StrideIndex = typename WorkspaceTensorRef::Layout::Stride::Index;
using FragmentWorkspace = AlignedArray<ElementWorkspace, kElementsPerAccess>;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentOutput = AlignedArray<ElementOutput, kElementsPerAccess>;
//
// Types
//
/// Params structure
struct Params {
MatrixCoord problem_size;
int partitions;
size_t partition_stride;
WorkspaceTensorRef workspace;
OutputTensorRef destination;
OutputTensorRef source;
typename OutputOp::Params output;
typename ReductionOp::Params reduction;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
MatrixCoord problem_size_,
int partitions_,
size_t partition_stride_,
WorkspaceTensorRef workspace_,
OutputTensorRef destination_,
OutputTensorRef source_,
typename OutputOp::Params output_ = typename OutputOp::Params(),
typename ReductionOp::Params reduction_ = typename ReductionOp::Params()
):
problem_size(problem_size_),
partitions(partitions_),
partition_stride(sizeof(FragmentWorkspace) * partition_stride_ / kElementsPerAccess),
workspace(workspace_),
destination(destination_),
source(source_),
output(output_),
reduction(reduction_) {
}
};
struct SharedStorage { };
public:
/// Computes the grid size given a chosen threadblock shape
CUTLASS_HOST_DEVICE
static dim3 grid_shape(
cutlass::MatrixCoord problem_size) {
return dim3(
(problem_size.row() + Shape::kRow - 1) / Shape::kRow,
(problem_size.column() + Shape::kColumn - 1) / Shape::kColumn);
}
/// Determines the threadblock shape
CUTLASS_HOST_DEVICE
static dim3 block_shape() {
return dim3(Shape::kColumn / kElementsPerAccess, Shape::kRow);
}
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &storage) {
// Determine CTA position
MatrixCoord thread_offset(
MatrixCoord::Index(int(blockIdx.x) * Shape::kRow + threadIdx.y),
MatrixCoord::Index(int(blockIdx.y) * Shape::kColumn + threadIdx.x * kElementsPerAccess)
);
// One guard conditional
if (!(thread_offset.row() < params.problem_size.row() &&
thread_offset.column() < params.problem_size.column())) {
return;
}
ReductionOp reduction_op(params.reduction);
FragmentAccumulator accumulator;
accumulator.clear();
//
// Load the first slice
//
char const *workspace_ptr =
reinterpret_cast<char const *>(
params.workspace.data() + params.workspace.offset(thread_offset));
FragmentWorkspace workspace_frag[kPartitionsPerStage];
//
// Construct the output operator
//
OutputOp output_op(params.output);
//
// Load and accumulate with a simple batched loading sequence.
//
CUTLASS_PRAGMA_NO_UNROLL
for (int k = 0; k < params.partitions; k += kPartitionsPerStage) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPartitionsPerStage; ++i) {
if (k + i < params.partitions) {
workspace_frag[i] = *reinterpret_cast<FragmentWorkspace const *>(workspace_ptr);
workspace_ptr += params.partition_stride;
}
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPartitionsPerStage; ++i) {
if (k + i < params.partitions) {
accumulator = reduction_op(accumulator, workspace_frag[i]);
}
}
}
//
// Conditionally load the source
//
FragmentOutput source_frag;
source_frag.clear();
FragmentOutput const *source_ptr = reinterpret_cast<FragmentOutput const *>(
params.source.data() + params.source.offset(thread_offset));
if (output_op.is_source_needed()) {
reinterpret_cast<FragmentOutput &>(source_frag) = *source_ptr;
}
//
// Compute the output
//
typename OutputOp::FragmentOutput output_frag = output_op(accumulator, source_frag);
//
// Store
//
FragmentOutput *dest_ptr = reinterpret_cast<FragmentOutput *>(
params.destination.data() + params.destination.offset(thread_offset));
*dest_ptr = reinterpret_cast<FragmentOutput const &>(output_frag);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace reduction
} // namespace cutlass
| 7,897 | C | 30.718875 | 100 | 0.651387 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/reduction/kernel/reduce_softmax_final.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a final reduction for softmax
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/memory_sm75.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace kernel {
template <
typename ElementNorm_,
typename ElementSum_,
typename ElementSoftmaxCompute_,
typename ThreadblockShape_,
bool GroupedProblem = false
>
class ApplySoftmaxFinalReduction {
public:
using ElementNorm = ElementNorm_;
using ElementSum = ElementSum_;
using ElementSoftmaxCompute = ElementSoftmaxCompute_;
using ThreadblockShape = ThreadblockShape_;
static const bool isGroupedProblem = GroupedProblem;
//
// Arguments
//
struct Arguments {
cutlass::gemm::GemmCoord* problem_sizes;
cutlass::gemm::GemmCoord problem_size;
ElementNorm* block_Norm;
ElementSum* block_Sum;
int64_t* offset_Norm_Device;
int64_t* offset_Sum_Device;
int64_t batch_stride_Max;
int64_t batch_stride_Sum;
//
// Methods
//
Arguments() { }
// Non-grouped constructor without batching
Arguments(
cutlass::gemm::GemmCoord problem_size,
ElementNorm* block_Norm,
ElementSum* block_Sum
):
problem_size(problem_size),
block_Norm(block_Norm),
block_Sum(block_Sum),
problem_sizes(nullptr),
offset_Norm_Device(nullptr),
offset_Sum_Device(nullptr),
batch_stride_Max(0),
batch_stride_Sum(0)
{
}
// Non-grouped constructor with batching
Arguments(
cutlass::gemm::GemmCoord problem_size,
ElementNorm* block_Norm,
ElementSum* block_Sum,
int64_t batch_stride_Max,
int64_t batch_stride_Sum
):
problem_size(problem_size),
block_Norm(block_Norm),
block_Sum(block_Sum),
batch_stride_Max(batch_stride_Max),
batch_stride_Sum(batch_stride_Sum),
problem_sizes(nullptr),
offset_Norm_Device(nullptr),
offset_Sum_Device(nullptr)
{
}
// Grouped constructor
Arguments(
cutlass::gemm::GemmCoord *problem_sizes,
ElementNorm* block_Norm,
ElementSum* block_Sum,
int64_t* offset_Norm_Device,
int64_t* offset_Sum_Device
):
problem_sizes(problem_sizes),
problem_size(cutlass::gemm::GemmCoord(0, 0, 0)),
block_Norm(block_Norm),
block_Sum(block_Sum),
offset_Norm_Device(offset_Norm_Device),
offset_Sum_Device(offset_Sum_Device)
{
}
};
struct SharedStorage {
};
//
// Params struct
//
struct Params {
Arguments args;
//
// Methods
//
Params() { }
Params(Arguments const &args_): args(args_) { }
};
private:
public:
CUTLASS_DEVICE
ApplySoftmaxFinalReduction() { }
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
apply(params, shared_storage);
}
private:
/// Full reduction
CUTLASS_DEVICE
void apply(Params const ¶ms, SharedStorage &shared_storage) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int bdim = blockDim.x;
int block_batch = blockIdx.z;
// defining three vars for a general reduction module
cutlass::gemm::GemmCoord problem_size = isGroupedProblem ? params.args.problem_sizes[bid] : params.args.problem_size;
int m_dim_in_loop = isGroupedProblem ? problem_size.m() : tid + bdim;
int access_offset = isGroupedProblem ? 0 : bid * bdim;
if (!isGroupedProblem && access_offset + tid >= problem_size.m()) return;
ElementNorm *curr_ptr_Max = isGroupedProblem ? \
params.args.block_Norm + params.args.offset_Norm_Device[bid] : \
params.args.block_Norm + block_batch * params.args.batch_stride_Max;
ElementSum *curr_ptr_Sum = isGroupedProblem ? \
params.args.block_Sum + params.args.offset_Sum_Device[bid] : \
params.args.block_Sum + block_batch * params.args.batch_stride_Sum;
int threadblock_num = (problem_size.n() + ThreadblockShape::kN - 1) / ThreadblockShape::kN;
using ConvertSumOutput = cutlass::NumericConverter<ElementSum, ElementSoftmaxCompute>;
using ConvertNormOutput = cutlass::NumericConverter<ElementNorm, ElementSoftmaxCompute>;
using ConvertSum = cutlass::NumericConverter<ElementSoftmaxCompute, ElementSum>;
using ConvertNorm = cutlass::NumericConverter<ElementSoftmaxCompute, ElementNorm>;
ConvertSum convert_sum;
ConvertNorm convert_norm;
ConvertSumOutput convert_sum_output;
ConvertNormOutput convert_norm_output;
uint32_t float_max_bits = 0xff7fffff;
float min_float = reinterpret_cast<float const &>(float_max_bits);
CUTLASS_PRAGMA_UNROLL
for (int idx_m = tid; idx_m < m_dim_in_loop; idx_m += bdim) {
ElementNorm *access_n = curr_ptr_Max + idx_m + access_offset;
ElementSum *access_s = curr_ptr_Sum + idx_m + access_offset;
ElementNorm *access_n_bak = access_n;
ElementSum *access_s_bak = access_s;
ElementSoftmaxCompute max_val = ElementSoftmaxCompute(min_float);
ElementSoftmaxCompute sum_val = ElementSoftmaxCompute(0);
ElementNorm fetch_n;
ElementSum fetch_s;
CUTLASS_PRAGMA_UNROLL
for (int idx_n = 0; idx_n < threadblock_num; idx_n++) {
cutlass::arch::global_load<ElementNorm, sizeof(ElementNorm)>(fetch_n, access_n, true);
max_val = cutlass::fast_max(max_val, convert_norm(fetch_n));
access_n += problem_size.m();
}
access_n = access_n_bak;
CUTLASS_PRAGMA_UNROLL
for (int idx_n = 0; idx_n < threadblock_num; idx_n++) {
cutlass::arch::global_load<ElementNorm, sizeof(ElementNorm)>(fetch_n, access_n, true);
cutlass::arch::global_load<ElementSum, sizeof(ElementSum)>(fetch_s, access_s, true);
sum_val += convert_sum(fetch_s) * cutlass::fast_exp(convert_norm(fetch_n) - max_val);
access_n += problem_size.m();
access_s += problem_size.m();
}
ElementSoftmaxCompute inv_sum = cutlass::constants::one<ElementSoftmaxCompute>() / sum_val;
access_n = access_n_bak;
access_s = access_s_bak;
access_n[0] = convert_norm_output(max_val);
access_s[0] = convert_sum_output(inv_sum);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace reduction
} // namespace cutlass
| 8,762 | C | 31.697761 | 121 | 0.632618 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm70.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
#include "cutlass/layout/pitch_linear.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace layout {
// template <
// int ElementSize,
// gemm::Operand Operand
// >
// struct VoltaTensorOpMultiplicandCongruous;
// template <
// int ElementSize,
// gemm::Operand Operand
// >
// struct ColumnMajorVoltaTensorOpMultiplicandCongruous;
// template <
// int ElementSize,
// gemm::Operand Operand
// >
// struct RowMajorVoltaTensorOpMultiplicandCongruous;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear memory.
template <int ElementSize>
struct VoltaTensorOpMultiplicandCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
/// This layout is optimized for 128b accesses
static int const kAccessSize = 128;
/// Fundamental tile shape in units of vectors
using TileShape = PitchLinearShape<8, 4>;
/// Fundamental partition shape in units of vectors
using PartitionShape = PitchLinearShape<8, 2>;
//
// Static constants
//
static int const kElementSize = ElementSize;
static int const kElementsPerAccess = kAccessSize / kElementSize;
using PartitionCount = PitchLinearShape<
TileShape::kContiguous / PartitionShape::kContiguous,
TileShape::kStrided / PartitionShape::kStrided
>;
using AccessCount = PitchLinearShape<
PartitionShape::kContiguous,
PartitionShape::kStrided
>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandCongruous(Index ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandCongruous(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static VoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) {
return VoltaTensorOpMultiplicandCongruous(extent[0]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
// First, compute c and s of vector within source (in units of vector accesses)
int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess;
int vec_strided_idx = coord.strided();
// Compute the fundamental tile being accessed
int tile_contiguous_idx = vec_contiguous_idx / TileShape::kContiguous;
int tile_strided_idx = vec_strided_idx / TileShape::kStrided;
int tile_contiguous_residual = vec_contiguous_idx % TileShape::kContiguous;
int tile_strided_residual = vec_strided_idx % TileShape::kStrided;
// Then swizzle in a tile
// Swizzle pattern is (tid[2:0] << 2)|(tid[4:3] ^ tid[2:1])
int permuted_strided_within_tile = (tile_contiguous_residual >> 1);
int permuted_contiguous_within_tile = (tile_strided_residual ^ permuted_strided_within_tile) |
((tile_contiguous_residual & 1) << 2);
// Compute final element location
int element_contiguous = (tile_contiguous_idx * TileShape::kContiguous +
permuted_contiguous_within_tile) * kElementsPerAccess + (coord.contiguous() % kElementsPerAccess);
int element_strided = tile_strided_idx * TileShape::kStrided + permuted_strided_within_tile;
return element_contiguous + element_strided * stride_[0];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[1] * stride_[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template mapping a column-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous
template <int ElementSize>
struct ColumnMajorVoltaTensorOpMultiplicandCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandCongruous<ElementSize>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorVoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) {
return ColumnMajorVoltaTensorOpMultiplicandCongruous(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.contiguous(), coord.strided());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
/// Template mapping a row-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous
template <int ElementSize>
struct RowMajorVoltaTensorOpMultiplicandCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandCongruous<ElementSize>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorVoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) {
return RowMajorVoltaTensorOpMultiplicandCongruous(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.strided(), coord.contiguous());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
/// Template based on element size (in bits) - defined in terms of pitch-linear memory.
// template <int ElementSize, Operand Operand>
template <int ElementSize>
struct VoltaTensorOpMultiplicandBCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
/// This layout is optimized for 128b accesses
static int const kAccessSize = 128;
/// Fundamental tile shape in units of vectors
using TileShape = PitchLinearShape<8, 4>;
/// Fundamental partition shape in units of vectors
using PartitionShape = PitchLinearShape<4, 4>;
//
// Static constants
//
static int const kElementSize = ElementSize;
static int const kElementsPerAccess = kAccessSize / kElementSize;
using PartitionCount = PitchLinearShape<
TileShape::kContiguous / PartitionShape::kContiguous,
TileShape::kStrided / PartitionShape::kStrided
>;
using AccessCount = PitchLinearShape<
PartitionShape::kContiguous,
PartitionShape::kStrided
>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandBCongruous(Index ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandBCongruous(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static VoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) {
return VoltaTensorOpMultiplicandBCongruous(extent[0]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
// First, compute c and s of vector within source (in units of vector accesses)
int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess;
int vec_strided_idx = coord.strided();
// Compute the fundamental tile being accessed
int tile_contiguous_idx = vec_contiguous_idx / TileShape::kContiguous;
int tile_strided_idx = vec_strided_idx / TileShape::kStrided;
int tile_contiguous_residual = vec_contiguous_idx % TileShape::kContiguous;
int tile_strided_residual = vec_strided_idx % TileShape::kStrided;
// Then swizzle in a tile
// Swizzle pattern is (tid[1:0] << 3)|(tid & 0x4)|(tid[1:0])
int permuted_strided_within_tile = (tile_contiguous_residual & 0x3);
int permuted_contiguous_within_tile = (tile_strided_residual ^ permuted_strided_within_tile) |
(tile_contiguous_residual & 0x4);
// Compute final element location
int element_contiguous = (tile_contiguous_idx * TileShape::kContiguous +
permuted_contiguous_within_tile) * kElementsPerAccess + (coord.contiguous() % kElementsPerAccess);
int element_strided = tile_strided_idx * TileShape::kStrided + permuted_strided_within_tile;
return element_contiguous + element_strided * stride_[0];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[1] * stride_[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template mapping a column-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous
template <int ElementSize>
struct ColumnMajorVoltaTensorOpMultiplicandBCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandBCongruous<ElementSize>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandBCongruous(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandBCongruous(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorVoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) {
return ColumnMajorVoltaTensorOpMultiplicandBCongruous(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.contiguous(), coord.strided());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
/// Template mapping a row-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous
template <int ElementSize>
struct RowMajorVoltaTensorOpMultiplicandBCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandBCongruous<ElementSize>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandBCongruous(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandBCongruous(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorVoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) {
return RowMajorVoltaTensorOpMultiplicandBCongruous(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.strided(), coord.contiguous());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
/// Template based on element size (in bits) - defined in terms of pitch-linear
/// memory and KBlock size (in elements).
template <int ElementSize, int KBlock>
struct VoltaTensorOpMultiplicandCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
/// This layout is optimized for 64b accesses
static int const kAccessSize = 64;
//
// Static constants
//
static int const kElementSize = ElementSize;
static int const kElementsPerAccess = kAccessSize / kElementSize;
static int const kKBlock = KBlock;
private:
//
// Data members
//
/// Stride data member. For GEMM, it equals to KBlock x stage.
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : stride_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandCrosswise(Stride stride) : stride_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static VoltaTensorOpMultiplicandCrosswise packed(TensorCoord const &extent) {
return VoltaTensorOpMultiplicandCrosswise(extent[1]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
//
// First, compute c and s of vector within source (in units of vector
// accesses)
//
int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess;
int vec_strided_idx = coord.strided();
//
// Then swizzle
// The mapping is like this:
// id[1:0]|(id[3]^id[4])|id[2]
int vec_strided_within_tile = vec_contiguous_idx & 0x7;
int permuted_vec_contiguous =
(vec_strided_idx & (~0xF)) + (vec_strided_idx & 0x3) * 4 +
(((vec_strided_idx >> 2) ^ ((vec_strided_idx & 0x10) >> 3)) & 0x3);
permuted_vec_contiguous ^= ((vec_strided_within_tile >> 1) & 0x3);
int permuted_vec_strided = vec_contiguous_idx;
//
// Compute final element location
//
int element_contiguous = permuted_vec_contiguous * kElementsPerAccess +
(coord.contiguous() % kElementsPerAccess);
return element_contiguous + permuted_vec_strided * (stride_[0] * kElementsPerAccess);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return stride_; }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return stride_; }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[0] * stride_[0];
}
};
/// Template mapping a column-major view of pitch-linear memory to
/// VoltaTensorOpMultiplicandCrosswise
template <int ElementSize, int KBlock>
struct ColumnMajorVoltaTensorOpMultiplicandCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandCrosswise<ElementSize, KBlock>;
/// This layout is optimized for 64b accesses
static int const kAccessSize = Base::kAccessSize;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorVoltaTensorOpMultiplicandCrosswise packed(
TensorCoord const &extent) {
return ColumnMajorVoltaTensorOpMultiplicandCrosswise(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.contiguous(), coord.strided());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return layout_.stride(); }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return layout_.stride(); }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
/// Template mapping a row-major view of pitch-linear memory to
/// TensorOpMultiplicandCrosswise
template <int ElementSize, int KBlock>
struct RowMajorVoltaTensorOpMultiplicandCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandCrosswise<ElementSize, KBlock>;
/// This layout is optimized for 64b accesses
static int const kAccessSize = Base::kAccessSize;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorVoltaTensorOpMultiplicandCrosswise packed(
TensorCoord const &extent) {
return RowMajorVoltaTensorOpMultiplicandCrosswise(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.strided(), coord.contiguous());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return layout_.stride(); }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return layout_.stride(); }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
} // namespace layout
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 29,599 | C | 27.325359 | 106 | 0.699652 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/layout/pitch_linear.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used by TensorRef and derived classes for pitch-linear memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
#include "cutlass/pitch_linear_coord.h"
namespace cutlass {
namespace layout {
template <int Contiguous, int Strided>
using PitchLinearShape = cutlass::PitchLinearShape < Contiguous, Strided >;
using PitchLinearCoord = PitchLinearCoord;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for pitch-linear memory
class PitchLinear {
public:
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
PitchLinear(LongIndex ldm = 0): stride_(ldm) { }
/// Constructor
CUTLASS_HOST_DEVICE
PitchLinear(Stride _stride): stride_(_stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static PitchLinear packed(TensorCoord const &extent) {
return PitchLinear(extent.contiguous());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return LongIndex(coord.contiguous()) + LongIndex(coord.strided()) * LongIndex(stride_[0]);
}
/// Returns the logical coordinate given an offset.
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex index) const {
return make_Coord(
TensorCoord::Index(index % stride_[0]),
TensorCoord::Index(index / stride_[0])
);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
LongIndex stride(int rank) const {
return stride_[rank];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
LongIndex & stride(int rank) {
return stride_[rank];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent.strided() * stride_[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace layout
} // namespace cutlass
| 4,696 | C | 30.52349 | 100 | 0.663969 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief layouts needed by Ampere fp64 tensor core kernels.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace layout {
////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear
/// memory and Crosswise size (in elements).
struct TensorOpMultiplicandCongruous64b {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Static constants
//
static int const kElementSize = 64;
static int const kElementsPerAccess = 1;
private:
//
// Data members
//
/// Stride data member.
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandCongruous64b(Index ldm = 0) : stride_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandCongruous64b(Stride stride) : stride_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorOpMultiplicandCongruous64b packed(TensorCoord const &extent) {
return TensorOpMultiplicandCongruous64b(extent[0]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
int tc = coord.contiguous() / 16;
int ts = coord.strided() / 4;
int c = coord.contiguous() % 16;
int s = coord.strided() % 4;
int bank = ((((c & 1) * 4 + (c & 6) / 2)) ^ (s & 1)) * 2 + (c / 8);
int row = (c & 6) / 2;
bank ^= ((s & 2) * 2);
LongIndex offset = tc * 16 + bank + (ts * 4 + row) * stride_[0];
return offset;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return stride_; }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return stride_; }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[1] * stride_[0];
}
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
return TensorCoord();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template mapping a column-major view of pitch-linear memory to
/// TensorOpMultiplicand
struct ColumnMajorTensorOpMultiplicandCongruous64b {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = TensorOpMultiplicandCongruous64b;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorTensorOpMultiplicandCongruous64b(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorTensorOpMultiplicandCongruous64b(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorTensorOpMultiplicandCongruous64b packed(TensorCoord const &extent) {
return ColumnMajorTensorOpMultiplicandCongruous64b(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.contiguous(), coord.strided());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template mapping a row-major view of pitch-linear memory to
/// TensorOpMultiplicand
struct RowMajorTensorOpMultiplicandCongruous64b {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = TensorOpMultiplicandCongruous64b;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorTensorOpMultiplicandCongruous64b(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorTensorOpMultiplicandCongruous64b(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorTensorOpMultiplicandCongruous64b packed(TensorCoord const &extent) {
return RowMajorTensorOpMultiplicandCongruous64b(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.strided(), coord.contiguous());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear
/// memory and Crosswise size (in elements).
struct TensorOpMultiplicand64bCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Static constants
//
static int const kElementSize = 64;
static int const kElementsPerAccess = 1;
private:
//
// Data members
//
/// Stride data member.
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicand64bCrosswise(Index ldm = 0) : stride_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicand64bCrosswise(Stride stride) : stride_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorOpMultiplicand64bCrosswise packed(TensorCoord const &extent) {
return TensorOpMultiplicand64bCrosswise(extent[0]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
int tc = coord.contiguous() / 16;
int ts = coord.strided() / 16;
int c = coord.contiguous() % 16;
int s = coord.strided() % 16;
int k_group = c / 4;
int access_s = s / 2;
int row = access_s % 4;
int bank = ((k_group & 2) << 2) ^ ((s % 2) << 3) + (c % 4) * 2 + (access_s / 4) ^ (k_group & 1);
int smem_row = (k_group * 4 + row) + tc * 16;
int smem_col = ts * 16 + bank;
LongIndex offset = smem_row * stride_[0] + smem_col;
return offset;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return stride_; }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return stride_; }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[1] * stride_[0];
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear
/// memory and Crosswise size (in elements).
struct ColumnMajorTensorOpMultiplicand64bCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = TensorOpMultiplicand64bCrosswise;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorTensorOpMultiplicand64bCrosswise(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorTensorOpMultiplicand64bCrosswise(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorTensorOpMultiplicand64bCrosswise packed(TensorCoord const &extent) {
return ColumnMajorTensorOpMultiplicand64bCrosswise(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear
/// memory and Crosswise size (in elements).
struct RowMajorTensorOpMultiplicand64bCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = TensorOpMultiplicand64bCrosswise;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorTensorOpMultiplicand64bCrosswise(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorTensorOpMultiplicand64bCrosswise(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorTensorOpMultiplicand64bCrosswise packed(TensorCoord const &extent) {
return RowMajorTensorOpMultiplicand64bCrosswise(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear
/// memory and Crosswise size (in elements).
struct TensorOpMultiplicandCongruous128b {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Static constants
//
static int const kElementSize = 128;
static int const kElementsPerAccess = 1;
private:
//
// Data members
//
/// Stride data member.
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandCongruous128b(Index ldm = 0) : stride_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandCongruous128b(Stride stride) : stride_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorOpMultiplicandCongruous128b packed(TensorCoord const &extent) {
return TensorOpMultiplicandCongruous128b(extent[0]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
Index tc = coord.contiguous() / 8;
Index ts = coord.strided() / 4;
Index c = coord.contiguous() % 8;
Index s = coord.strided() % 4;
Index k_index = (c / 2);
Index bank = (((c & 1) * 4) | (s ^ k_index));
LongIndex offset = tc * 8 + bank + (ts * 4 + k_index) * stride_[0];
return offset;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return stride_; }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return stride_; }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[1] * stride_[0];
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
return TensorCoord();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template mapping a column-major view of pitch-linear memory to
/// TensorOpMultiplicand
struct ColumnMajorTensorOpMultiplicandCongruous128b {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = TensorOpMultiplicandCongruous128b;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorTensorOpMultiplicandCongruous128b(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorTensorOpMultiplicandCongruous128b(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorTensorOpMultiplicandCongruous128b packed(TensorCoord const &extent) {
return ColumnMajorTensorOpMultiplicandCongruous128b(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.contiguous(), coord.strided());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template mapping a row-major view of pitch-linear memory to
/// TensorOpMultiplicand
struct RowMajorTensorOpMultiplicandCongruous128b {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = TensorOpMultiplicandCongruous128b;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorTensorOpMultiplicandCongruous128b(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorTensorOpMultiplicandCongruous128b(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorTensorOpMultiplicandCongruous128b packed(TensorCoord const &extent) {
return RowMajorTensorOpMultiplicandCongruous128b(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.strided(), coord.contiguous());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear
/// memory and Crosswise size (in elements).
struct TensorOpMultiplicandCrosswise128x4 {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Static constants
//
static int const kElementSize = 128;
static int const kElementsPerAccess = 1;
private:
//
// Data members
//
/// Stride data member.
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandCrosswise128x4(Index ldm = 0) : stride_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandCrosswise128x4(Stride stride) : stride_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorOpMultiplicandCrosswise128x4 packed(TensorCoord const &extent) {
return TensorOpMultiplicandCrosswise128x4(extent[0]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
Index tc = coord.contiguous() / 8;
Index ts = coord.strided() / 8;
Index c = coord.contiguous() % 8;
Index s = coord.strided() % 8;
Index liq = c % 4;
Index bank = liq + ((s & 1) * 4) ^ (c & 4);
Index k_index = (c & 4) + (s / 4) * 2 + ((s & 2) / 2);
LongIndex offset = (tc * 8 + k_index) * stride_[0] + ts * 8 + bank;
return offset;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return stride_; }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return stride_; }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[1] * stride_[0];
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template mapping a column-major view of pitch-linear memory to
/// TensorOpMultiplicand
struct ColumnMajorTensorOpMultiplicandCrosswise128x4 {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = TensorOpMultiplicandCrosswise128x4;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorTensorOpMultiplicandCrosswise128x4(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorTensorOpMultiplicandCrosswise128x4(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorTensorOpMultiplicandCrosswise128x4 packed(TensorCoord const &extent) {
return ColumnMajorTensorOpMultiplicandCrosswise128x4(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template mapping a row-major view of pitch-linear memory to
/// TensorOpMultiplicand
struct RowMajorTensorOpMultiplicandCrosswise128x4 {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = TensorOpMultiplicandCrosswise128x4;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorTensorOpMultiplicandCrosswise128x4(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorTensorOpMultiplicandCrosswise128x4(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorTensorOpMultiplicandCrosswise128x4 packed(TensorCoord const &extent) {
return RowMajorTensorOpMultiplicandCrosswise128x4(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace layout
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 29,336 | C | 24.734211 | 100 | 0.667201 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/layout/permute.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used by GEMM+permute path for common tensor or matrix formats.
Like Layout functions, permute layout functions map logical coordinates to linear memory. They often require additional
data to describe strides between elements.
Permute layout functions must implement all members in the interface of NoPermute<> defined in this file. Address offset
computation lies in operator() with private member variables {col_permute_, row_permute_ and stride_permute_} as new addresses after permute op.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include "assert.h"
#endif
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/coord.h"
#include "cutlass/tensor_coord.h"
namespace cutlass {
namespace layout {
class NoPermute {
public:
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
private:
//
// Data members
//
MatrixCoord extent_;
Index stride_unit_; // sizeof(AccessType) / kElementsPerAccess in epilogue's predicated_tile_iterator
Index stride_permute_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
NoPermute() { }
/// Constructor
CUTLASS_HOST_DEVICE
NoPermute(MatrixCoord extent, Index stride_init): extent_(extent) { }
/// Computes the address offset after Permute Op in Bytes
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord offset_init) { return 0; }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Defines permute layouts of various tensor formats.
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permute layout function for 4-D permuted tensors with output matrix (dimension as [M, N]) reshaped
/// as [M/D1, D1, D2, N/D2]. Then perform permute([0, 2, 1, 3]) on the corresponding output tensor.
template <int D1, int D2>
class Tensor4DPermute0213 {
public:
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
private:
//
// Data members
//
MatrixCoord extent_;
Index stride_permute_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermute0213() { }
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermute0213(MatrixCoord extent, Index stride_init): extent_(extent) {
/// Update stride_permute with stride_init
stride_permute_ = stride_init / D2 * D1; // stride in Elements
}
/// Computes the address offset after Permute Op in Bytes
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord offset_init) {
// Permute as torch.permute(X1, [0, 2, 1, 3]) -> 4D Tensor indices as [i,j,k,l], the dimension of X
// is [D0, D1, D2, D3], after permutation the dim of X1 is [D0, D2, D1, D3].
assert(extent_.row() % D1 == 0);
assert(extent_.column() % D2 == 0);
int D3 = extent_.column() / D2;
Index col_init = offset_init.column();
Index row_init = offset_init.row();
int l = col_init % D3;
int k = col_init / D3;
int j = row_init % D1;
int i = row_init / D1;
// After the Permute Op
Index col_permute = l + j * D3;
Index row_permute = k + i * D2;
return LongIndex(row_permute) * LongIndex(stride_permute_) + LongIndex(col_permute);
}
/// Return D1
CUTLASS_HOST_DEVICE
Index d1() const {
return D1;
}
/// Return D2
CUTLASS_HOST_DEVICE
Index d2() const {
return D2;
}
};
/// Permute layout function for 4-D permuted tensors for BMM with BMM output tensor (dimension as [B, M, N]) reshaped
/// as [B/D1, D1, M, N]. Then perform permute([0, 2, 1, 3]) on the corresponding whole BMM output tensor.
template <int D1>
class Tensor4DPermuteBMM0213 {
public:
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
private:
//
// Data members
//
MatrixCoord extent_;
Index stride_permute_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermuteBMM0213() { }
/// Constructor
CUTLASS_HOST_DEVICE
Tensor4DPermuteBMM0213(MatrixCoord extent, Index stride_init): extent_(extent) {
/// Update stride_permute with stride_init
stride_permute_ = stride_init * D1; // stride in Elements
}
/// Computes the address offset after Permute Op in Bytes
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord offset_init) {
// The batch index for BMM
Index BMM_batch_idx = blockIdx.z;
// Permute as torch.permute(X1, [0, 2, 1, 3]) -> 4D Tensor indices as [i,j,k,l], the dimension of X
// is [D0, D1, D2, D3], after permutation the dim of X1 is [D0, D2, D1, D3].
int D2 = extent_.row();
int D3 = extent_.column();
Index col_init = offset_init.column();
Index row_init = offset_init.row();
int l = col_init;
int k = row_init;
int j = BMM_batch_idx % D1;
int i = BMM_batch_idx / D1;
// After the Permute Op
Index col_permute = l + j * D3;
Index row_permute = k + i * D2;
return LongIndex(row_permute) * LongIndex(stride_permute_) + LongIndex(col_permute);
}
/// Return D1
CUTLASS_HOST_DEVICE
Index d1() const {
return D1;
}
};
/// Permute layout function for 5-D permuted tensors with output matrix (dimension as [M, N]) reshaped
/// as [M/T1, T1, T2, T3, N/T2/T3]. Then perform permute([2, 0, 3, 1, 4]) on the corresponding output tensor.
template <int T1, int T2, int T3>
class Tensor5DPermute20314 {
public:
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
private:
//
// Data members
//
MatrixCoord extent_;
Index stride_permute_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
Tensor5DPermute20314() { }
/// Constructor
CUTLASS_HOST_DEVICE
Tensor5DPermute20314(MatrixCoord extent, Index stride_init): extent_(extent) {
/// Update stride_permute with stride_init
stride_permute_ = stride_init / T2 * T1; // stride in Elements
}
/// Computes the address offset after Permute Op in Bytes
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord offset_init) {
// Permute as torch.permute(X1, [2, 0, 3, 1, 4]) -> 5D Tensor indices as [i,j,k,l,m], the dimension of X
// is [T0, T1, T2, T3, T4], after permutation the dim of X1 is [T2, T0, T3, T1, T4].
int T0 = extent_.row() / T1;
int T4 = extent_.column() / T2 / T3;
Index col_init = offset_init.column();
Index row_init = offset_init.row();
int m = col_init % T4;
int l = int(col_init / T4) % T3;
int k = int(col_init / T4) / T3;
int j = row_init % T1;
int i = row_init / T1;
// After the Permute Op
Index col_permute = m + j * T4 + l * T1 * T4;
Index row_permute = i + k * T0;
return LongIndex(row_permute) * LongIndex(stride_permute_) + LongIndex(col_permute);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace layout
} // namespace cutlass
| 9,133 | C | 27.996825 | 149 | 0.642286 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/layout/vector.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used for rank=1 vectors.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
namespace cutlass {
namespace layout {
/// Tensor layout for densely packed vectors.
class PackedVectorLayout {
public:
/// Logical rank of tensor
static int const kRank = 1;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Coord<kRank, Index>;
/// Stride vector
using Stride = Coord<kStrideRank, Index>;
private:
//
// No actual stride vector stored
//
public:
//
// Methods
//
CUTLASS_HOST_DEVICE
PackedVectorLayout() { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static PackedVectorLayout packed(TensorCoord const &size) {
return PackedVectorLayout();
}
/// Returns the offset of a coordinate in linear memory
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return coord[0];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return make_Coord(1);
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &size) const {
return size[0];
}
};
} // namespace layout
} // namespace cutlass
| 3,328 | C | 30.704762 | 100 | 0.692308 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm75.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/layout/pitch_linear.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace layout {
////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear
/// memory and Crosswise size (in elements).
/// This one is the base class of all Ampere/Turing fp16/bf16/int8/int4/int1
/// tensor core kernels. tf32 TN uses this too.
template <int ElementSize, int Crosswise>
struct TensorOpMultiplicand {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Static constants
//
/// This layout is optimized for 128b accesses
static int const kAccessSize = 128;
static int const kElementSize = ElementSize;
static int const kElementsPerAccess = kAccessSize / kElementSize;
static int const kCrosswise = Crosswise;
/// Contiguous dimension of the tile shape matches one shared memory cache
/// line - 128B. For 128bit access size, it equals to 8 accesses.
static int const kTileShapeContiguous = 128 / (kAccessSize / 8);
/// Number of kblocks to store PartitionShape::kContiguous Elements
static int const kFactor =
kTileShapeContiguous * kElementsPerAccess / kCrosswise;
static_assert(
(kFactor > 0),
"kCrosswise should be no large than one shared memory cache line.");
/// The strided dimension needs to be at least (WarpSize(32) /
/// kTileShapeContiguous) for a warp to access. To ensure conflict free
/// access, it also needs to be at least (kTileShapeContiguous / kFactor).
/// See comments below
static int const kTileShapeStride =
((kTileShapeContiguous / kFactor) > (32 / kTileShapeContiguous))
? (kTileShapeContiguous / kFactor)
: (32 / kTileShapeContiguous);
/// Fundamental tile shape in units of vectors to guarantee bank conflict free
/// shared memory load/store.
/// For kFactor = 1, TileShape = <8, 8>
/// For kFactor > 1, TileShape = <8, 4>
using TileShape = PitchLinearShape<kTileShapeContiguous, kTileShapeStride>;
/// Fundamental partition shape in units of vectors
using PartitionShape = PitchLinearShape<4, 4>;
using PartitionCount =
PitchLinearShape<TileShape::kContiguous / PartitionShape::kContiguous,
TileShape::kStrided / PartitionShape::kStrided>;
using AccessCount =
PitchLinearShape<PartitionShape::kContiguous, PartitionShape::kStrided>;
private:
//
// Data members
//
/// Stride data member. For GEMM, it equals to kCrosswise x stage.
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicand(Index ldm = 0) : stride_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicand(Stride stride) : stride_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorOpMultiplicand packed(TensorCoord const &extent) {
return TensorOpMultiplicand(extent[0]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
//
// First, compute c and s of vector within source (in units of vector
// accesses)
//
int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess;
int vec_strided_idx = coord.strided() / kFactor;
// Compute the fundamental tile being accessed
int tile_contiguous_idx =
vec_contiguous_idx / (TileShape::kContiguous / kFactor);
int tile_contiguous_residual =
vec_contiguous_idx % (TileShape::kContiguous / kFactor) +
((coord.strided() % kFactor) * (TileShape::kContiguous / kFactor));
int tile_strided_residual = vec_strided_idx % TileShape::kStrided;
// Compute the 'partition' within the fundamental tile
int partition_contiguous_idx =
tile_contiguous_residual / PartitionShape::kContiguous;
int partition_strided_idx =
tile_strided_residual / PartitionShape::kStrided;
int partition_contiguous_residual =
tile_contiguous_residual % PartitionShape::kContiguous;
int partition_strided_residual =
tile_strided_residual % PartitionShape::kStrided;
//
// Then swizzle
//
int permuted_vec_contiguous_within_partition =
partition_contiguous_residual ^ (partition_strided_residual % 4);
int permuted_partition_contiguous_within_tile =
partition_contiguous_idx ^ (partition_strided_idx % 2);
//
// Compute final element location
//
int element_contiguous = (tile_contiguous_idx * TileShape::kContiguous +
permuted_partition_contiguous_within_tile *
PartitionShape::kContiguous +
permuted_vec_contiguous_within_partition) *
kElementsPerAccess +
(coord.contiguous() % kElementsPerAccess);
int element_strided = vec_strided_idx;
return element_contiguous + element_strided * stride_[0] * kFactor;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return stride_; }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return stride_; }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[1] * stride_[0];
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear
/// memory and Crosswise size (in elements).
template <int ElementSize, int Crosswise>
struct TensorOpMultiplicandCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = TensorOpMultiplicand<ElementSize, Crosswise>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandCongruous(Index ldm = 0) : layout_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandCongruous(Stride stride) : layout_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorOpMultiplicandCongruous packed(TensorCoord const &extent) {
return TensorOpMultiplicandCongruous(extent[0]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(coord);
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return coord;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return layout_.stride(); }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return layout_.stride(); }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(extent);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear
/// memory and Crosswise size (in elements).
/// This one is just for TF32 NT kernel.
template <int Crosswise>
struct TensorOpMultiplicandCongruous<32, Crosswise> {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
/// This layout is optimized for 128b accesses
static int const kAccessSize = 128;
/// Fundamental tile shape in units of vectors
using TileShape = PitchLinearShape<8, 4>;
/// Partitionshape is the same as TileShape for this layout
using PartitionShape = PitchLinearShape<8, 4>;
using PartitionCount =
PitchLinearShape<TileShape::kContiguous / PartitionShape::kContiguous,
TileShape::kStrided / PartitionShape::kStrided>;
using AccessCount =
PitchLinearShape<PartitionShape::kContiguous, PartitionShape::kStrided>;
//
// Static constants
//
static int const kElementSize = 32;
static int const kElementsPerAccess = kAccessSize / kElementSize;
private:
//
// Data members
//
/// Stride data member.
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandCongruous(Index ldm = 0) : stride_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandCongruous(Stride stride) : stride_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorOpMultiplicandCongruous packed(TensorCoord const &extent) {
return TensorOpMultiplicandCongruous(extent[0]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
int tc = coord.contiguous() / 32;
int ts = coord.strided() / 4;
int c = (coord.contiguous() % 32) / kElementsPerAccess;
int s = coord.strided() % 4;
LongIndex offset = (c ^ (2 * s)) * kElementsPerAccess + s * stride_[0] +
tc * 32 + ts * stride_[0] * 4 + coord.contiguous() % 4;
return offset;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return stride_; }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return stride_; }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[1] * stride_[0];
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template mapping a column-major view of pitch-linear memory to
/// TensorOpMultiplicand
template <int ElementSize, int Crosswise>
struct ColumnMajorTensorOpMultiplicandCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = TensorOpMultiplicandCongruous<ElementSize, Crosswise>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorTensorOpMultiplicandCongruous packed(TensorCoord const &extent) {
return ColumnMajorTensorOpMultiplicandCongruous(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.contiguous(), coord.strided());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template mapping a row-major view of pitch-linear memory to
/// TensorOpMultiplicand
template <int ElementSize, int Crosswise>
struct RowMajorTensorOpMultiplicandCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = TensorOpMultiplicandCongruous<ElementSize, Crosswise>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorTensorOpMultiplicandCongruous packed(TensorCoord const &extent) {
return RowMajorTensorOpMultiplicandCongruous(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.strided(), coord.contiguous());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear
/// memory and Crosswise size (in elements).
template <int ElementSize, int Crosswise>
struct TensorOpMultiplicandCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = TensorOpMultiplicand<ElementSize, Crosswise>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
static int const kCrosswise = Base::kCrosswise;
static int const kFactor = Base::kFactor;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorOpMultiplicandCrosswise packed(TensorCoord const &extent) {
return TensorOpMultiplicandCrosswise(extent[0]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(coord);
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return coord;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return layout_.stride(); }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return layout_.stride(); }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(extent);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template mapping a column-major view of pitch-linear memory to
/// TensorOpMultiplicandCrosswise
template <int ElementSize, int Crosswise>
struct ColumnMajorTensorOpMultiplicandCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = TensorOpMultiplicandCrosswise<ElementSize, Crosswise>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorTensorOpMultiplicandCrosswise packed(
TensorCoord const &extent) {
return ColumnMajorTensorOpMultiplicandCrosswise(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.contiguous(), coord.strided());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return layout_.stride(); }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return layout_.stride(); }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template mapping a row-major view of pitch-linear memory to
/// TensorOpMultiplicandCrosswise
template <int ElementSize, int Crosswise>
struct RowMajorTensorOpMultiplicandCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = TensorOpMultiplicandCrosswise<ElementSize, Crosswise>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorTensorOpMultiplicandCrosswise packed(
TensorCoord const &extent) {
return RowMajorTensorOpMultiplicandCrosswise(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.strided(), coord.contiguous());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return layout_.stride(); }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return layout_.stride(); }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear memory.
template <int ElementSize, int InterleavedK>
struct TensorOpMultiplicandColumnMajorInterleaved {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
/// This layout is optimized for 128b accesses
static int const kAccessSize = 128;
//
// Static constants
//
static int const kElementSize = ElementSize;
static int const kElementsPerAccess = kAccessSize / kElementSize;
//static int const kThreadBlockStrided = ThreadBlockStrided;
static int const kInterleavedK = InterleavedK;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandColumnMajorInterleaved(Index ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandColumnMajorInterleaved(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorOpMultiplicandColumnMajorInterleaved packed(TensorCoord const &extent) {
return TensorOpMultiplicandColumnMajorInterleaved(extent[0] * kInterleavedK);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
int const rows_per_smem_cache_line = 128 / kInterleavedK;
int row_id = coord.strided() / rows_per_smem_cache_line;
int col_id = (coord.strided() % rows_per_smem_cache_line) * kInterleavedK + coord.contiguous();
int access_block_id = col_id >> 4;
int swizzle_access_block_id = access_block_id ^ (row_id & 1);
int swizzle_col_id = swizzle_access_block_id << 4;
return row_id * 128 + swizzle_col_id;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return (extent[1] / kInterleavedK) * stride_[0];
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear memory.
template <int ElementSize, int InterleavedK>
struct TensorOpMultiplicandRowMajorInterleaved {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
/// This layout is optimized for 128b accesses
static int const kAccessSize = 128;
//
// Static constants
//
static int const kElementSize = ElementSize;
static int const kElementsPerAccess = kAccessSize / kElementSize;
//static int const kThreadBlockStrided = ThreadBlockStrided;
static int const kInterleavedK = InterleavedK;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandRowMajorInterleaved(Index ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
TensorOpMultiplicandRowMajorInterleaved(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorOpMultiplicandRowMajorInterleaved packed(TensorCoord const &extent) {
return TensorOpMultiplicandRowMajorInterleaved(extent[1] * kInterleavedK);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
int const rows_per_smem_cache_line = 128 / kInterleavedK;
int row_id = coord.strided() / rows_per_smem_cache_line;
int col_id = (coord.strided() % rows_per_smem_cache_line) * kInterleavedK + coord.contiguous();
int access_block_id = col_id >> 4;
int swizzle_access_block_id = access_block_id ^ (row_id & 1);
int swizzle_col_id = swizzle_access_block_id << 4;
return row_id * 128 + swizzle_col_id;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return (extent[0] / kInterleavedK) * stride_[0];
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace layout
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 33,137 | C | 27.518072 | 100 | 0.683194 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/layout/layout.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used by TensorRef and derived classes.
Layout functions map logical coordinates to linear memory. They often require additional
data to describe strides between elements.
Layout functions must implement all members in the public interface of IdentityTensorLayout<>
defined in cutlass/tensor_ref.h.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor_op_multiplicand_sm70.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace layout {
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace layout
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| 3,020 | C | 45.476922 | 100 | 0.618212 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/layout/tensor.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used by TensorRef and derived classes for common 4-D and 5-D
tensor formats.
Layout functions map logical coordinates to linear memory. They often require additional
data to describe strides between elements.
Layout functions must implement all members in the public interface of IdentityTensorLayout<>
defined in cutlass/tensor_ref.h.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include "assert.h"
#endif
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/coord.h"
#include "cutlass/tensor_coord.h"
namespace cutlass {
namespace layout {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Defines data layouts of various tensor formats usable by TensorRef and other classes.
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 4-D NHWC tensors.
class TensorNHWC {
public:
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate (n, h, w, c)
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [stride_w, stride_h, stride_n]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorNHWC(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorNHWC(
typename Stride::Index stride_w, ///< number of elements between adjacent W coordinates
typename Stride::Index stride_h, ///< number of elements between adjacent H coordinates
typename Stride::Index stride_n ///< number of elements between adjacent N coordinates
):
stride_(make_Coord(stride_w, stride_h, stride_n)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorNHWC(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]))
) { }
/// Helper returns a layout to a tightly packed NHWC tensor.
CUTLASS_HOST_DEVICE
static TensorNHWC packed(TensorCoord const &extent) {
return TensorNHWC(
make_Coord(
extent.c(),
extent.w() * extent.c(),
extent.h() * extent.w() * extent.c()
)
);
}
/// Returns the offset of a coordinate (n, h, w, c) in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return coord.c() +
LongIndex(stride_[0] * coord.w()) +
LongIndex(stride_[1] * coord.h()) +
LongIndex(stride_[2] * coord.n());
}
/// Returns the offset of a pitchlinear coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return coord.contiguous() + LongIndex(coord.strided() * stride_[2]);
}
/// Returns the logical coordinate (n, h, w, c) from a given offset in linear memory.
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex index) const {
int n = 0, h = 0, w = 0, c = 0;
#if defined(__CUDA_ARCH__)
int tmp = 0;
c = int(index % static_cast<int>(stride_[0]));
unsigned int hw_mul, hw_shr, w_mul, w_shr, c_mul, c_shr;
find_divisor(hw_mul, hw_shr, stride_[2]);
find_divisor(w_mul, w_shr, stride_[1]);
find_divisor(c_mul, c_shr, stride_[0]);
fast_divmod(n, tmp, index, int(stride_[2]), hw_mul, hw_shr);
fast_divmod(h, w, tmp, int(stride_[1]), w_mul, w_shr);
fast_divmod(w, tmp, w, int(stride_[0]), c_mul, c_shr);
#else
n = int(index / stride_[2]);
LongIndex residual = index % stride_[2];
h = int(residual / stride_[1]);
residual = (residual % stride_[1]);
w = int(residual / stride_[0]);
c = int(residual % stride_[0]);
#endif
return TensorCoord(n, h, w, c);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
// it does not make sense if the extent is larger than stride
// and we could not rely on the capacity calculation in such cases
// we could move this checkers to debug code only
if ((extent.c() > stride_[0])
|| (extent.w() * stride_[0] > stride_[1])
|| (extent.h() * stride_[1] > stride_[2])) {
assert(0);
}
return extent.n() * stride_[2];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 4-D NCHW tensors.
class TensorNCHW {
public:
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [w, hw, chw]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorNCHW(Stride const &stride = Stride(0)): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorNCHW packed(TensorCoord const &extent) {
return TensorNCHW(
make_Coord(
extent.w(),
extent.w() * extent.h(),
extent.h() * extent.w() * extent.c()
)
);
}
/// Returns the offset of a coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return coord.w() +
LongIndex(stride_[0] * coord.h()) +
LongIndex(stride_[1] * coord.c()) +
LongIndex(stride_[2] * coord.n());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent.n() * stride_[2];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 4-D NC/xHWx tensors.
template <int Interleave>
class TensorNCxHWx {
public:
/// Interleaving quantity
static int const kInterleave = Interleave;
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [Interleave x w, Interleave x wh, hwc]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorNCxHWx(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorNCxHWx(
typename Stride::Index stride_w, ///< number of elements between adjacent W coordinates
typename Stride::Index stride_h, ///< number of elements between adjacent H coordinates
typename Stride::Index stride_n ///< number of elements between adjacent N coordinates
):
stride_(make_Coord(stride_w, stride_h, stride_n)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorNCxHWx(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]))
) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorNCxHWx packed(TensorCoord const &extent) {
return TensorNCxHWx(
make_Coord(
kInterleave * extent.w(),
kInterleave * extent.w() * extent.h(),
extent.h() * extent.w() * extent.c()
)
);
}
/// Returns the offset of a coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
Index c_minor = (coord.c() % kInterleave);
Index c_major = (coord.c() / kInterleave);
return c_minor +
LongIndex(kInterleave * coord.w()) +
LongIndex(stride_[0] * coord.h()) +
LongIndex(stride_[1] * c_major) +
LongIndex(stride_[2] * coord.n());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent.n() * stride_[2];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 4-D CxRSKx tensors.
template <int Interleave>
class TensorCxRSKx {
public:
/// Interleaving quantity
static int const kInterleave = Interleave;
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [Interleave x n, Interleave x nw, Interleave x nwh]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorCxRSKx(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorCxRSKx(
typename Stride::Index stride_w, ///< number of elements between adjacent W coordinates
typename Stride::Index stride_h, ///< number of elements between adjacent H coordinates
typename Stride::Index stride_n ///< number of elements between adjacent N coordinates
):
stride_(make_Coord(stride_w, stride_h, stride_n)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorCxRSKx(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]))
) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorCxRSKx packed(TensorCoord const &extent) {
return TensorCxRSKx(
make_Coord(
kInterleave * extent.n(),
kInterleave * extent.n() * extent.w(),
kInterleave * extent.n() * extent.w() * extent.h()
)
);
}
/// Returns the offset of a coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
Index c_minor = (coord.c() % kInterleave);
Index c_major = (coord.c() / kInterleave);
return c_minor +
LongIndex(kInterleave * coord.n()) +
LongIndex(stride_[0] * coord.w()) +
LongIndex(stride_[1] * coord.h()) +
LongIndex(stride_[2] * c_major);
}
/// Returns the offset of a pitchlinear coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord const &coord) const {
return (coord.contiguous() % kInterleave) +
LongIndex((coord.contiguous() / kInterleave) * stride_[2]) +
LongIndex(coord.strided() * kInterleave);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return (extent.c() / kInterleave * stride_[2]);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 5-D NDHWC tensors.
class TensorNDHWC {
public:
/// Logical rank of tensor
static int const kRank = 5;
/// Rank of stride vector
static int const kStrideRank = 4;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate (n, d, h, w, c)
using TensorCoord = Tensor5DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [c, wc, hwc, dhwc]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorNDHWC(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorNDHWC(
typename Stride::Index c,
typename Stride::Index wc,
typename Stride::Index hwc,
typename Stride::Index dhwc):
stride_(make_Coord(c, wc, hwc, dhwc)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorNDHWC(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]),
static_cast<typename Stride::Index>(stride[3]))
) { }
/// Helper returns a layout to a tightly packed NHWC tensor.
CUTLASS_HOST_DEVICE
static TensorNDHWC packed(TensorCoord const &extent) {
return TensorNDHWC(
make_Coord(
extent.c(),
extent.w() * extent.c(),
extent.h() * extent.w() * extent.c(),
extent.d() * extent.h() * extent.w() * extent.c()
)
);
}
/// Returns the offset of a coordinate (n, d, h, w, c) in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return coord.c() +
LongIndex(stride_[0] * coord.w()) +
LongIndex(stride_[1] * coord.h()) +
LongIndex(stride_[2] * coord.d()) +
LongIndex(stride_[3] * coord.n());
}
/// Returns the offset of a pitchlinear coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return coord.contiguous() + LongIndex(coord.strided() * stride_[3]);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
// it does not make sense if the extent is larger than stride
// and we could not rely on the capacity calculation in such cases
// we could move this checkers to debug code only
if ((extent.c() > stride_[0])
|| (extent.w() * stride_[0] > stride_[1])
|| (extent.h() * stride_[1] > stride_[2])
|| (extent.d() * stride_[2] > stride_[3])) {
assert(0);
}
return extent.n() * stride_[3];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace layout
} // namespace cutlass
| 18,295 | C | 27.722135 | 100 | 0.630227 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/layout/matrix.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used by TensorRef and derived classes.
Layout functions map logical coordinates to linear memory. They often require additional
data to describe strides between elements.
Layout functions must implement all members in the public interface of IdentityTensorLayout<>
defined in cutlass/tensor_ref.h.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/pitch_linear_coord.h"
namespace cutlass {
namespace layout {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Defines data layouts of various matrix formats usable by TensorRef and other classes.
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for row-major matrices.
class RowMajor {
public:
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
RowMajor(LongIndex ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajor(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajor packed(MatrixCoord const &extent) {
return RowMajor(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return LongIndex(coord.row()) * LongIndex(stride_[0]) + coord.column();
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
return MatrixCoord(Index(offset / stride_[0]), Index(offset % stride_[0]));
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return LongIndex(extent.row()) * LongIndex(stride_[0]);
}
};
/// Mapping function for column-major matrices.
class ColumnMajor {
public:
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajor(LongIndex ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajor(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajor packed(MatrixCoord const &extent) {
return ColumnMajor(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return LongIndex(coord.column()) * LongIndex(stride_[0]) + coord.row();
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
return MatrixCoord(Index(offset % stride_[0]), Index(offset / stride_[0]));
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return LongIndex(extent.column()) * LongIndex(stride_[0]);
}
};
/// Mapping function for interleaved matrices. Matrix is structured
/// as row-major arrangement of fixed-size columns.
template <int Interleave>
struct RowMajorInterleaved {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
/// Size of interleaved columns
static int const kInterleave = Interleave;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorInterleaved(LongIndex ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorInterleaved(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorInterleaved packed(MatrixCoord const &extent) {
return RowMajorInterleaved(extent.column() * kInterleave);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
Index row_major = coord.row() / kInterleave;
Index row_minor = coord.row() % kInterleave;
return LongIndex(row_major) * LongIndex(stride_[0]) + LongIndex(coord.column()) * kInterleave + row_minor;
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
Index row_major = Index(offset / stride_[0]);
Index residual = Index(offset % stride_[0]);
Index column = residual / kInterleave;
Index row_minor = residual % kInterleave;
return MatrixCoord(row_major * kInterleave + row_minor, column);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return (extent.row() + kInterleave - 1) / kInterleave * stride_[0];
}
};
/// Mapping function for interleaved matrices. Matrix is structured
/// as column-major arrangement of fixed-size rows.
template <int Interleave>
struct ColumnMajorInterleaved {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
/// Size of interleaved columns
static int const kInterleave = Interleave;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorInterleaved(LongIndex ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorInterleaved(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorInterleaved packed(MatrixCoord const &extent) {
return ColumnMajorInterleaved(extent.row() * kInterleave);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
Index column_major = coord.column() / kInterleave;
Index column_minor = coord.column() % kInterleave;
return LongIndex(column_major) * LongIndex(stride_[0]) + LongIndex(coord.row()) * kInterleave + column_minor;
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
Index column_major = Index(offset / stride_[0]);
Index residual = Index(offset % stride_[0]);
Index row = residual / kInterleave;
Index column_minor = residual % kInterleave;
return MatrixCoord(row, column_major * kInterleave + column_minor);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return (extent.column() + kInterleave - 1) / kInterleave * stride_[0];
}
};
/// Enumerated type for canonical pitch-linear matrix layouts
enum class Matrix {
kColumnMajor, ///< leading dimension refers to stride between columns; stride along rows is 1
kRowMajor ///< leading dimension refers to stride between rows; stride along columns is 1
};
/// Mapping function for scenario in which layout is row-major or column-major but this information
/// is only available at runtime.
struct ContiguousMatrix {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
/// Enumerated type indicating canonical matrix layout
Matrix layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ContiguousMatrix(
Index ldm = 0,
Matrix layout = Matrix::kColumnMajor
):
stride_(ldm), layout_(layout) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ContiguousMatrix packed(
MatrixCoord const &extent,
Matrix layout = Matrix::kColumnMajor) {
Index ldm = 0;
if (layout == Matrix::kColumnMajor) {
ldm = extent.row();
}
else if (layout == Matrix::kRowMajor) {
ldm = extent.column();
}
return ContiguousMatrix(ldm, layout);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
if (layout_ == Matrix::kColumnMajor) {
return coord.row() + coord.column() * stride_[0];
}
else if (layout_ == Matrix::kRowMajor) {
return coord.row() * stride_[0] + coord.column();
}
else {
// degenerate case
return 0;
}
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
// TODO
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
if (layout_ == Matrix::kColumnMajor) {
return stride_[0] * extent.column();
}
else if (layout_ == Matrix::kRowMajor) {
return stride_[0] * extent.row();
}
else {
// degenerate case
return 0;
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for scenario in which both rows and columns are separated by a stride.
template <int Rank>
struct AffineRankN {
/// Logical rank of tensor
static int const kRank = Rank;
/// Rank of stride vector
static int const kStrideRank = kRank;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Coord<kRank, Index>;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
AffineRankN(
Stride const &stride = Stride()
):
stride_(stride) { }
/// Ctor
CUTLASS_HOST_DEVICE
AffineRankN(
Coord<kRank/2, LongIndex> const &stride_m,
Coord<kRank/2, LongIndex> const &stride_n
) {
// Concatenate the strides
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kRank/2; ++m) {
stride_[m] = stride_m[m];
}
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kRank/2; ++n) {
stride_[n + kRank/2] = stride_n[n];
}
}
/// Ctor for N = 2
CUTLASS_HOST_DEVICE
AffineRankN(
LongIndex const &stride_m,
LongIndex const &stride_n
) {
stride_[0] = stride_m;
stride_[1] = stride_n;
}
/// Ctor for N = 2
CUTLASS_HOST_DEVICE
AffineRankN(
LongIndex const &stride
) {
stride_[0] = stride;
stride_[1] = 1;
}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static AffineRankN packed(TensorCoord const &extent) {
AffineRankN layout;
layout.stride_[kRank - 1] = 1;
CUTLASS_PRAGMA_UNROLL
for (int i = kRank - 1; i > 0; --i) {
layout.stride_[i - 1] = layout.stride_[i] * extent[i];
}
return layout;
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return dot(coord, stride_);
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
// TODO
return TensorCoord();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
int idx = stride_.max_dim_index();
return extent[idx] * stride_[idx];
}
};
/// Mapping function for scenario in which both rows and columns are separated by a stride.
/// Row stride is smaller than column stride in AffineRank2ColumnMajor.
struct AffineRank2ColumnMajor {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 2;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2ColumnMajor(
Stride const &stride = Stride()
):
stride_(stride) { }
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2ColumnMajor(
LongIndex row_stride, ///< stride between elements in consecutive rows
LongIndex column_stride ///< stride between elements in consecutive columns
)
{ stride_[0] = row_stride; stride_[1] = column_stride;}
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2ColumnMajor(
LongIndex stride
)
{ stride_[0] = 1; stride_[1] = stride;}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static AffineRank2ColumnMajor packed(MatrixCoord const &extent) {
return AffineRank2ColumnMajor(extent.column(), 1);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return dot(coord, stride_);
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
// TODO
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return extent.column() * stride_[1];
}
};
/// Mapping function for scenario in which both rows and columns are separated by a stride.
/// Column stride is smaller than row stride in AffineRank2RowMajor.
struct AffineRank2RowMajor {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 2;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2RowMajor(
Stride const &stride = Stride()
):
stride_(stride) { }
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2RowMajor(
LongIndex row_stride, ///< stride between elements in consecutive rows
LongIndex column_stride ///< stride between elements in consecutive columns
) { stride_[0] = row_stride; stride_[1] = column_stride;}
/// Ctor
CUTLASS_HOST_DEVICE
AffineRank2RowMajor(
LongIndex stride
) { stride_[0] = stride; stride_[1] = 1;}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static AffineRank2RowMajor packed(MatrixCoord const &extent) {
return AffineRank2RowMajor(extent.column(), 1);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return dot(coord, stride_);
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
// TODO
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return extent.row() * stride_[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Utility functions to convert stride_factor to the strides used by the Affine2 layout.
//
// stride_factor is the logical distance between two coorinates.
//
// All Coodinates used here are matrix coordinates. stride[0] and extent[0] are for the
// rows. stride[1] and extent[1] are for the columns.
template <typename Affine2Layout>
struct Affine2Layout_Factory {
CUTLASS_HOST_DEVICE
static Affine2Layout layout_factory(cutlass::Coord<2> const &extent, typename Affine2Layout::Stride stride_factor) {
return Affine2Layout::packed(extent);
}
};
template <>
struct Affine2Layout_Factory<cutlass::layout::AffineRank2ColumnMajor> {
CUTLASS_HOST_DEVICE
static cutlass::layout::AffineRank2ColumnMajor layout_factory(
cutlass::Coord<2> const &extent,
typename cutlass::layout::AffineRank2ColumnMajor::Stride stride_factor) {
return cutlass::layout::AffineRank2ColumnMajor({ stride_factor[0], stride_factor[0] * stride_factor[1] * extent[0] });
}
};
template <>
struct Affine2Layout_Factory<cutlass::layout::AffineRank2RowMajor> {
CUTLASS_HOST_DEVICE
static cutlass::layout::AffineRank2RowMajor layout_factory(
cutlass::Coord<2> const &extent,
typename cutlass::layout::AffineRank2RowMajor::Stride stride_factor) {
return cutlass::layout::AffineRank2RowMajor({ stride_factor[0] * stride_factor[1] * extent[1], stride_factor[1] });
}
};
// The base layout cutlass::layout::AffineRankN<2> is similar to AffineRank2ColumnMajor
template <>
struct Affine2Layout_Factory<cutlass::layout::AffineRankN<2>> {
CUTLASS_HOST_DEVICE
static cutlass::layout::AffineRankN<2> layout_factory(
cutlass::Coord<2> const &extent,
typename cutlass::layout::AffineRankN<2>::Stride stride_factor) {
return cutlass::layout::AffineRankN<2>({ stride_factor[0], stride_factor[0] * stride_factor[1] * extent[0] });
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for block-linear matrices. Matrix is structured
/// as column-major arrangement of 2D tiles (that are column-major).
template <int BlockRows, int BlockColumns>
struct ColumnMajorBlockLinear {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
/// Size of a block in rows
static int const kBlockRows = BlockRows;
/// Size of a block in columns
static int const kBlockColumns = BlockColumns;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorBlockLinear(Index ldm = 0): stride_(ldm) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorBlockLinear packed(MatrixCoord const &extent) {
return ColumnMajorBlockLinear(extent.row() * kBlockRows * kBlockColumns);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return
(coord.row() % kBlockRows) +
(coord.column() % kBlockColumns) * kBlockRows +
(coord.row() / kBlockRows) * kBlockRows * kBlockColumns +
(coord.column() / kBlockColumns) * stride_[0];
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
// TODO
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return (extent.column() + kBlockColumns - 1) / kBlockColumns * stride_[0];
}
};
/// Mapping function for block-linear matrices. Matrix is structured
/// as row-major arrangement of 2D tiles (that are row-major)
template <int BlockRows, int BlockColumns>
struct RowMajorBlockLinear {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, LongIndex>;
/// Size of a block in rows
static int const kBlockRows = BlockRows;
/// Size of a block in columns
static int const kBlockColumns = BlockColumns;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorBlockLinear(Index ldm = 0): stride_(ldm) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorBlockLinear packed(MatrixCoord const &extent) {
return RowMajorBlockLinear(extent.column() * kBlockRows * kBlockColumns);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
return
(coord.column() % kBlockColumns) +
(coord.row() % kBlockRows) * kBlockColumns +
(coord.column() / kBlockColumns) * kBlockRows * kBlockColumns +
(coord.row() / kBlockRows) * stride_[0];
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
MatrixCoord inverse(LongIndex offset) const {
// TODO
return MatrixCoord(0, 0);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
return (extent.row() + kBlockRows - 1) / kBlockRows * stride_[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct GeneralMatrix {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 2;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index>;
private:
//
// Data members
//
Matrix layout_id_;
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
GeneralMatrix(): layout_id_(Matrix::kColumnMajor), stride_(make_Coord(0, 1)) { }
/// Ctor
CUTLASS_HOST_DEVICE
GeneralMatrix(
Matrix layout_id,
Index ldm,
Index interleave): layout_id_(layout_id), stride_(make_Coord(ldm, interleave)) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static GeneralMatrix packed(
MatrixCoord const &extent,
Matrix layout_id = Matrix::kColumnMajor,
Index interleave = 1) {
Index c;
if (layout_id == Matrix::kRowMajor) {
c = extent.column();
}
else {
c = extent.row();
}
Index ldm = c * interleave;
return GeneralMatrix(layout_id, ldm, interleave);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (row, column)
CUTLASS_HOST_DEVICE
LongIndex operator()(MatrixCoord const &coord) const {
Index c, s;
if (layout_id_ == Matrix::kRowMajor) {
c = coord.column();
s = coord.row();
}
else {
s = coord.column();
c = coord.row();
}
Index v = s / stride_[1];
Index residual = (s % stride_[1]);
return LongIndex(c) * LongIndex(stride_[1]) + LongIndex(v) * LongIndex(stride_[0]) + residual;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
CUTLASS_HOST_DEVICE
Matrix layout_id() const {
return layout_id_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
CUTLASS_HOST_DEVICE
Matrix & layout_id() {
return layout_id_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index stride(int idx) const {
return stride_[idx];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
typename Stride::Index & stride(int idx) {
return stride_[idx];
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(MatrixCoord const &extent) const {
Index s;
if (layout_id_ == Matrix::kRowMajor) {
s = extent.row();
}
else {
s = extent.column();
}
Index v = Index((s + stride_[1] - 1) / stride_[1]);
return LongIndex(v) * LongIndex(stride_[0]);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines transposes of matrix layouts
template <typename Layout>
struct LayoutTranspose;
/// Transpose of row-major is column-major
template <>
struct LayoutTranspose<layout::RowMajor> {
using type = layout::ColumnMajor;
};
/// Transpose of column-major is row-major
template <>
struct LayoutTranspose<layout::ColumnMajor> {
using type = layout::RowMajor;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace layout
} // namespace cutlass
| 34,712 | C | 24.675296 | 122 | 0.664813 |
NVIDIA/warp/warp/sim/import_usd.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import re
import numpy as np
import warp as wp
def parse_usd(
source,
builder,
default_density=1.0e3,
only_load_enabled_rigid_bodies=False,
only_load_enabled_joints=True,
contact_ke=1e5,
contact_kd=250.0,
contact_kf=500.0,
contact_ka=0.0,
contact_mu=0.6,
contact_restitution=0.0,
contact_thickness=0.0,
joint_limit_ke=100.0,
joint_limit_kd=10.0,
armature=0.0,
invert_rotations=False,
verbose=False,
ignore_paths=None,
):
"""
Parses a Universal Scene Description (USD) stage containing UsdPhysics schema definitions for rigid-body articulations and adds the bodies, shapes and joints to the given ModelBuilder.
The USD description has to be either a path (file name or URL), or an existing USD stage instance that implements the `UsdStage <https://openusd.org/dev/api/class_usd_stage.html>`_ interface.
Args:
source (str | pxr.UsdStage): The file path to the USD file, or an existing USD stage instance.
builder (ModelBuilder): The :class:`ModelBuilder` to add the bodies and joints to.
default_density (float): The default density to use for bodies without a density attribute.
only_load_enabled_rigid_bodies (bool): If True, only rigid bodies which do not have `physics:rigidBodyEnabled` set to False are loaded.
only_load_enabled_joints (bool): If True, only joints which do not have `physics:jointEnabled` set to False are loaded.
contact_ke (float): The default contact stiffness to use, only considered by the Euler integrators.
contact_kd (float): The default contact damping to use, only considered by the Euler integrators.
contact_kf (float): The default friction stiffness to use, only considered by the Euler integrators.
contact_ka (float): The default adhesion distance to use, only considered by the Euler integrators.
contact_mu (float): The default friction coefficient to use if a shape has not friction coefficient defined.
contact_restitution (float): The default coefficient of restitution to use if a shape has not coefficient of restitution defined.
contact_thickness (float): The thickness to add to the shape geometry.
joint_limit_ke (float): The default stiffness to use for joint limits, only considered by the Euler integrators.
joint_limit_kd (float): The default damping to use for joint limits, only considered by the Euler integrators.
armature (float): The armature to use for the bodies.
invert_rotations (bool): If True, inverts any rotations defined in the shape transforms.
verbose (bool): If True, print additional information about the parsed USD file.
ignore_paths (List[str]): A list of regular expressions matching prim paths to ignore.
Returns:
dict: Dictionary with the following entries:
.. list-table::
:widths: 25 75
* - "fps"
- USD stage frames per second
* - "duration"
- Difference between end time code and start time code of the USD stage
* - "up_axis"
- Upper-case string of the stage's up axis ("X", "Y", or "Z")
* - "path_shape_map"
- Mapping from prim path (str) of the UsdGeom to the respective shape index in :class:`ModelBuilder`
* - "path_body_map"
- Mapping from prim path (str) of a rigid body prim (e.g. that implements the PhysicsRigidBodyAPI) to the respective body index in :class:`ModelBuilder`
* - "path_shape_scale"
- Mapping from prim path (str) of the UsdGeom to its respective 3D world scale
* - "mass_unit"
- The stage's Kilograms Per Unit (KGPU) definition (1.0 by default)
* - "linear_unit"
- The stage's Meters Per Unit (MPU) definition (1.0 by default)
Note:
This importer is experimental and only supports a subset of the USD Physics schema. Please report any issues you encounter.
"""
try:
from pxr import Usd, UsdGeom, UsdPhysics
except ImportError as e:
raise ImportError("Failed to import pxr. Please install USD (e.g. via `pip install usd-core`).") from e
if ignore_paths is None:
ignore_paths = []
def get_attribute(prim, name):
if "*" in name:
regex = name.replace("*", ".*")
for attr in prim.GetAttributes():
if re.match(regex, attr.GetName()):
return attr
else:
return prim.GetAttribute(name)
def has_attribute(prim, name):
attr = get_attribute(prim, name)
return attr.IsValid() and attr.HasAuthoredValue()
def parse_float(prim, name, default=None):
attr = get_attribute(prim, name)
if not attr or not attr.HasAuthoredValue():
return default
val = attr.Get()
if np.isfinite(val):
return val
return default
def parse_quat(prim, name, default=None):
attr = get_attribute(prim, name)
if not attr or not attr.HasAuthoredValue():
return default
val = attr.Get()
if invert_rotations:
quat = wp.quat(*val.imaginary, -val.real)
else:
quat = wp.quat(*val.imaginary, val.real)
l = wp.length(quat)
if np.isfinite(l) and l > 0.0:
return quat
return default
def parse_vec(prim, name, default=None):
attr = get_attribute(prim, name)
if not attr or not attr.HasAuthoredValue():
return default
val = attr.Get()
if np.isfinite(val).all():
return np.array(val, dtype=np.float32)
return default
def parse_generic(prim, name, default=None):
attr = get_attribute(prim, name)
if not attr or not attr.HasAuthoredValue():
return default
return attr.Get()
def str2axis(s: str) -> np.ndarray:
axis = np.zeros(3, dtype=np.float32)
axis["XYZ".index(s.upper())] = 1.0
return axis
if isinstance(source, str):
stage = Usd.Stage.Open(source, Usd.Stage.LoadAll)
else:
stage = source
mass_unit = 1.0
try:
if UsdPhysics.StageHasAuthoredKilogramsPerUnit(stage):
mass_unit = UsdPhysics.GetStageKilogramsPerUnit(stage)
except Exception as e:
if verbose:
print(f"Failed to get mass unit: {e}")
linear_unit = 1.0
try:
if UsdGeom.StageHasAuthoredMetersPerUnit(stage):
linear_unit = UsdGeom.GetStageMetersPerUnit(stage)
except Exception as e:
if verbose:
print(f"Failed to get linear unit: {e}")
def parse_xform(prim):
xform = UsdGeom.Xform(prim)
mat = np.array(xform.GetLocalTransformation(), dtype=np.float32)
if invert_rotations:
rot = wp.quat_from_matrix(wp.mat33(mat[:3, :3].T.flatten()))
else:
rot = wp.quat_from_matrix(wp.mat33(mat[:3, :3].flatten()))
pos = mat[3, :3] * linear_unit
scale = np.ones(3, dtype=np.float32)
for op in xform.GetOrderedXformOps():
if op.GetOpType() == UsdGeom.XformOp.TypeScale:
scale = np.array(op.Get(), dtype=np.float32)
return wp.transform(pos, rot), scale
def parse_axis(prim, type, joint_data, is_angular, axis=None):
# parse joint axis data
schemas = prim.GetAppliedSchemas()
schemas_str = "".join(schemas)
if f"DriveAPI:{type}" not in schemas_str and f"PhysicsLimitAPI:{type}" not in schemas_str:
return
drive_type = parse_generic(prim, f"drive:{type}:physics:type", "force")
if drive_type != "force":
print(f"Warning: only force drive type is supported, ignoring drive:{type} for joint {path}")
return
stiffness = parse_float(prim, f"drive:{type}:physics:stiffness", 0.0)
damping = parse_float(prim, f"drive:{type}:physics:damping", 0.0)
low = parse_float(prim, f"limit:{type}:physics:low")
high = parse_float(prim, f"limit:{type}:physics:high")
target_pos = parse_float(prim, f"drive:{type}:physics:targetPosition")
target_vel = parse_float(prim, f"drive:{type}:physics:targetVelocity")
if is_angular:
stiffness *= mass_unit * linear_unit**2
stiffness = np.deg2rad(stiffness)
damping *= mass_unit * linear_unit**2
damping = np.deg2rad(damping)
if target_pos is not None:
target_pos = np.deg2rad(target_pos)
if target_vel is not None:
target_vel = np.deg2rad(target_vel)
if low is None:
low = joint_data["lowerLimit"]
else:
low = np.deg2rad(low)
if high is None:
high = joint_data["upperLimit"]
else:
high = np.deg2rad(high)
else:
stiffness *= mass_unit
damping *= mass_unit
if target_pos is not None:
target_pos *= linear_unit
if target_vel is not None:
target_vel *= linear_unit
if low is None:
low = joint_data["lowerLimit"]
else:
low *= linear_unit
if high is None:
high = joint_data["upperLimit"]
else:
high *= linear_unit
mode = wp.sim.JOINT_MODE_FORCE
if f"DriveAPI:{type}" in schemas_str:
if target_vel is not None and target_vel != 0.0:
mode = wp.sim.JOINT_MODE_TARGET_VELOCITY
else:
mode = wp.sim.JOINT_MODE_TARGET_POSITION
if low > high:
low = (low + high) / 2
high = low
axis = wp.sim.JointAxis(
axis=(axis or joint_data["axis"]),
limit_lower=low,
limit_upper=high,
action=(target_pos or target_vel or (low + high) / 2),
target_ke=stiffness,
target_kd=damping,
mode=mode,
limit_ke=joint_limit_ke,
limit_kd=joint_limit_kd,
)
if is_angular:
joint_data["angular_axes"].append(axis)
else:
joint_data["linear_axes"].append(axis)
axis_str = "Y"
try:
axis_str = UsdGeom.GetStageUpAxis(stage)
except Exception as e:
if verbose:
print(f"Failed to parse stage up axis: {e}")
upaxis = str2axis(axis_str)
shape_types = {"Cube", "Sphere", "Mesh", "Capsule", "Plane", "Cylinder", "Cone"}
path_body_map = {}
path_shape_map = {}
path_shape_scale = {}
# maps prim path name to its world transform
path_world_poses = {}
# transform from body frame to where the actual joint child frame is
# so that the link's children will use the right parent tf for the joint
prim_joint_xforms = {}
path_collision_filters = set()
no_collision_shapes = set()
body_density = {} # mapping from body ID to defined density
# first find all joints and materials
joint_data = {} # mapping from path of child link to joint USD settings
materials = {} # mapping from material path to material USD settings
joint_parents = set() # paths of joint parents
for prim in stage.Traverse():
type_name = str(prim.GetTypeName())
path = str(prim.GetPath())
# if verbose:
# print(path, type_name)
if type_name.endswith("Joint"):
# the type name can sometimes be "DistancePhysicsJoint" or "PhysicsDistanceJoint" ...
type_name = type_name.replace("Physics", "").replace("Joint", "")
child = str(prim.GetRelationship("physics:body1").GetTargets()[0])
pos0 = parse_vec(prim, "physics:localPos0", np.zeros(3, dtype=np.float32)) * linear_unit
pos1 = parse_vec(prim, "physics:localPos1", np.zeros(3, dtype=np.float32)) * linear_unit
rot0 = parse_quat(prim, "physics:localRot0", wp.quat_identity())
rot1 = parse_quat(prim, "physics:localRot1", wp.quat_identity())
joint_data[child] = {
"type": type_name,
"name": str(prim.GetName()),
"parent_tf": wp.transform(pos0, rot0),
"child_tf": wp.transform(pos1, rot1),
"enabled": parse_generic(prim, "physics:jointEnabled", True),
"collisionEnabled": parse_generic(prim, "physics:collisionEnabled", False),
"excludeFromArticulation": parse_generic(prim, "physics:excludeFromArticulation", False),
"axis": str2axis(parse_generic(prim, "physics:axis", "X")),
"breakForce": parse_float(prim, "physics:breakForce", np.inf),
"breakTorque": parse_float(prim, "physics:breakTorque", np.inf),
"linear_axes": [],
"angular_axes": [],
}
if only_load_enabled_joints and not joint_data[child]["enabled"]:
print("Skipping disabled joint", path)
continue
# parse joint limits
lower = parse_float(prim, "physics:lowerLimit", -np.inf)
upper = parse_float(prim, "physics:upperLimit", np.inf)
if type_name == "Distance":
# if distance is negative the joint is not limited
joint_data[child]["lowerLimit"] = parse_float(prim, "physics:minDistance", -1.0) * linear_unit
joint_data[child]["upperLimit"] = parse_float(prim, "physics:maxDistance", -1.0) * linear_unit
elif type_name == "Prismatic":
joint_data[child]["lowerLimit"] = lower * linear_unit
joint_data[child]["upperLimit"] = upper * linear_unit
else:
joint_data[child]["lowerLimit"] = np.deg2rad(lower) if np.isfinite(lower) else lower
joint_data[child]["upperLimit"] = np.deg2rad(upper) if np.isfinite(upper) else upper
if joint_data[child]["lowerLimit"] > joint_data[child]["upperLimit"]:
joint_data[child]["lowerLimit"] = (
joint_data[child]["lowerLimit"] + joint_data[child]["upperLimit"]
) / 2
joint_data[child]["upperLimit"] = joint_data[child]["lowerLimit"]
parents = prim.GetRelationship("physics:body0").GetTargets()
if len(parents) > 0:
parent_path = str(parents[0])
joint_data[child]["parent"] = parent_path
joint_parents.add(parent_path)
else:
joint_data[child]["parent"] = None
# parse joint drive
parse_axis(prim, "angular", joint_data[child], is_angular=True)
parse_axis(prim, "rotX", joint_data[child], is_angular=True, axis=(1.0, 0.0, 0.0))
parse_axis(prim, "rotY", joint_data[child], is_angular=True, axis=(0.0, 1.0, 0.0))
parse_axis(prim, "rotZ", joint_data[child], is_angular=True, axis=(0.0, 0.0, 1.0))
parse_axis(prim, "linear", joint_data[child], is_angular=False)
parse_axis(prim, "transX", joint_data[child], is_angular=False, axis=(1.0, 0.0, 0.0))
parse_axis(prim, "transY", joint_data[child], is_angular=False, axis=(0.0, 1.0, 0.0))
parse_axis(prim, "transZ", joint_data[child], is_angular=False, axis=(0.0, 0.0, 1.0))
elif type_name == "Material":
material = {}
if has_attribute(prim, "physics:density"):
material["density"] = parse_float(prim, "physics:density") * mass_unit # / (linear_unit**3)
if has_attribute(prim, "physics:restitution"):
material["restitution"] = parse_float(prim, "physics:restitution", contact_restitution)
if has_attribute(prim, "physics:staticFriction"):
material["staticFriction"] = parse_float(prim, "physics:staticFriction", contact_mu)
if has_attribute(prim, "physics:dynamicFriction"):
material["dynamicFriction"] = parse_float(prim, "physics:dynamicFriction", contact_mu)
materials[path] = material
elif type_name == "PhysicsScene":
try:
scene = UsdPhysics.Scene(prim)
g_vec = scene.GetGravityDirectionAttr()
g_mag = scene.GetGravityMagnitudeAttr()
if g_mag.HasAuthoredValue() and np.isfinite(g_mag.Get()):
builder.gravity = -np.abs(g_mag.Get() * linear_unit)
if g_vec.HasAuthoredValue() and np.linalg.norm(g_vec.Get()) > 0.0:
builder.up_vector = np.array(g_vec.Get(), dtype=np.float32)
if np.any(builder.up_vector < 0.0):
builder.up_vector = -builder.up_vector
else:
builder.up_vector = upaxis
except Exception as e:
if verbose:
print(f"Failed to parse physics scene: {e}")
def parse_prim(prim, incoming_xform, incoming_scale, parent_body: int = -1):
nonlocal builder
nonlocal joint_data
nonlocal path_body_map
nonlocal path_shape_map
nonlocal path_shape_scale
nonlocal path_world_poses
nonlocal prim_joint_xforms
nonlocal path_collision_filters
nonlocal no_collision_shapes
nonlocal body_density
path = str(prim.GetPath())
for pattern in ignore_paths:
if re.match(pattern, path):
return
type_name = str(prim.GetTypeName())
if type_name.endswith("Joint") or type_name.endswith("Light") or type_name.endswith("Material"):
return
if verbose:
print(f"parse_prim {prim.GetPath()} ({type_name})")
if type_name == "PhysicsScene":
# in case the PhysicsScene has bodies as children...
for child in prim.GetChildren():
parse_prim(child, incoming_xform, incoming_scale, parent_body)
schemas = set(prim.GetAppliedSchemas())
children_refs = prim.GetChildren()
prim_joint_xforms[path] = wp.transform()
local_xform, scale = parse_xform(prim)
scale = incoming_scale * scale
xform = wp.mul(incoming_xform, local_xform)
path_world_poses[path] = xform
geo_tf = local_xform
body_id = parent_body
is_rigid_body = "PhysicsRigidBodyAPI" in schemas and parent_body == -1
create_rigid_body = is_rigid_body or path in joint_parents
if create_rigid_body:
body_id = builder.add_body(
origin=xform,
name=prim.GetName(),
armature=armature,
)
path_body_map[path] = body_id
body_density[body_id] = 0.0
parent_body = body_id
geo_tf = wp.transform()
# set up joints between rigid bodies after the children have been added
if path in joint_data:
joint = joint_data[path]
joint_params = {
"child": body_id,
"linear_axes": joint["linear_axes"],
"angular_axes": joint["angular_axes"],
"name": joint["name"],
"enabled": joint["enabled"],
"parent_xform": joint["parent_tf"],
"child_xform": joint["child_tf"],
"armature": armature,
}
parent_path = joint["parent"]
if parent_path is None:
joint_params["parent"] = -1
parent_tf = wp.transform()
else:
joint_params["parent"] = path_body_map[parent_path]
parent_tf = path_world_poses[parent_path]
# the joint to which we are connected will transform this body already
geo_tf = wp.transform()
if verbose:
print(f"Adding joint {joint['name']} between {joint['parent']} and {path}")
print(" parent_xform", joint["parent_tf"])
print(" child_xform ", joint["child_tf"])
print(" parent_tf ", parent_tf)
print(f" geo_tf at {path} = {geo_tf} (xform was {xform})")
if joint["type"] == "Revolute":
joint_params["joint_type"] = wp.sim.JOINT_REVOLUTE
if len(joint_params["angular_axes"]) == 0:
joint_params["angular_axes"].append(
wp.sim.JointAxis(
joint["axis"],
limit_lower=joint["lowerLimit"],
limit_upper=joint["upperLimit"],
limit_ke=joint_limit_ke,
limit_kd=joint_limit_kd,
)
)
elif joint["type"] == "Prismatic":
joint_params["joint_type"] = wp.sim.JOINT_PRISMATIC
if len(joint_params["linear_axes"]) == 0:
joint_params["linear_axes"].append(
wp.sim.JointAxis(
joint["axis"],
limit_lower=joint["lowerLimit"],
limit_upper=joint["upperLimit"],
limit_ke=joint_limit_ke,
limit_kd=joint_limit_kd,
)
)
elif joint["type"] == "Spherical":
joint_params["joint_type"] = wp.sim.JOINT_BALL
elif joint["type"] == "Fixed":
joint_params["joint_type"] = wp.sim.JOINT_FIXED
elif joint["type"] == "Distance":
joint_params["joint_type"] = wp.sim.JOINT_DISTANCE
# we have to add a dummy linear X axis to define the joint limits
joint_params["linear_axes"].append(
wp.sim.JointAxis(
(1.0, 0.0, 0.0),
limit_lower=joint["lowerLimit"],
limit_upper=joint["upperLimit"],
limit_ke=joint_limit_ke,
limit_kd=joint_limit_kd,
)
)
elif joint["type"] == "":
joint_params["joint_type"] = wp.sim.JOINT_D6
else:
print(f"Warning: unsupported joint type {joint['type']} for {path}")
builder.add_joint(**joint_params)
elif is_rigid_body:
builder.add_joint_free(child=body_id)
# free joint; we set joint_q/qd, not body_q/qd since eval_fk is used after model creation
builder.joint_q[-4:] = xform.q
builder.joint_q[-7:-4] = xform.p
linear_vel = parse_vec(prim, "physics:velocity", np.zeros(3, dtype=np.float32)) * linear_unit
angular_vel = parse_vec(prim, "physics:angularVelocity", np.zeros(3, dtype=np.float32)) * linear_unit
builder.joint_qd[-6:-3] = angular_vel
builder.joint_qd[-3:] = linear_vel
if verbose:
print(f"added {type_name} body {body_id} ({path}) at {xform}")
density = None
material = None
if prim.HasRelationship("material:binding:physics"):
other_paths = prim.GetRelationship("material:binding:physics").GetTargets()
if len(other_paths) > 0:
material = materials[str(other_paths[0])]
if material is not None:
if "density" in material:
density = material["density"]
if has_attribute(prim, "physics:density"):
d = parse_float(prim, "physics:density")
density = d * mass_unit # / (linear_unit**3)
# assert prim.GetAttribute('orientation').Get() == "rightHanded", "Only right-handed orientations are supported."
enabled = parse_generic(prim, "physics:rigidBodyEnabled", True)
if only_load_enabled_rigid_bodies and not enabled:
if verbose:
print("Skipping disabled rigid body", path)
return
mass = parse_float(prim, "physics:mass")
if is_rigid_body:
if density is None:
density = default_density
body_density[body_id] = density
elif density is None:
if body_id >= 0:
density = body_density[body_id]
else:
density = 0.0
com = parse_vec(prim, "physics:centerOfMass", np.zeros(3, dtype=np.float32))
i_diag = parse_vec(prim, "physics:diagonalInertia", np.zeros(3, dtype=np.float32))
i_rot = parse_quat(prim, "physics:principalAxes", wp.quat_identity())
# parse children
if type_name == "Xform":
if prim.IsInstance():
proto = prim.GetPrototype()
for child in proto.GetChildren():
parse_prim(child, xform, scale, parent_body)
else:
for child in children_refs:
parse_prim(child, xform, scale, parent_body)
elif type_name == "Scope":
for child in children_refs:
parse_prim(child, incoming_xform, incoming_scale, parent_body)
elif type_name in shape_types:
# parse shapes
shape_params = {
"ke": contact_ke,
"kd": contact_kd,
"kf": contact_kf,
"ka": contact_ka,
"mu": contact_mu,
"restitution": contact_restitution,
}
if material is not None:
if "restitution" in material:
shape_params["restitution"] = material["restitution"]
if "dynamicFriction" in material:
shape_params["mu"] = material["dynamicFriction"]
if has_attribute(prim, "doubleSided") and not prim.GetAttribute("doubleSided").Get():
print(f"Warning: treating {path} as double-sided because single-sided collisions are not supported.")
if type_name == "Cube":
size = parse_float(prim, "size", 2.0)
if has_attribute(prim, "extents"):
extents = parse_vec(prim, "extents") * scale
# TODO position geom at extents center?
# geo_pos = 0.5 * (extents[0] + extents[1])
extents = extents[1] - extents[0]
else:
extents = scale * size
shape_id = builder.add_shape_box(
body_id,
geo_tf.p,
geo_tf.q,
hx=extents[0] / 2,
hy=extents[1] / 2,
hz=extents[2] / 2,
density=density,
thickness=contact_thickness,
**shape_params,
)
elif type_name == "Sphere":
if not (scale[0] == scale[1] == scale[2]):
print("Warning: Non-uniform scaling of spheres is not supported.")
if has_attribute(prim, "extents"):
extents = parse_vec(prim, "extents") * scale
# TODO position geom at extents center?
# geo_pos = 0.5 * (extents[0] + extents[1])
extents = extents[1] - extents[0]
if not (extents[0] == extents[1] == extents[2]):
print("Warning: Non-uniform extents of spheres are not supported.")
radius = extents[0]
else:
radius = parse_float(prim, "radius", 1.0) * scale[0]
shape_id = builder.add_shape_sphere(
body_id, geo_tf.p, geo_tf.q, radius, density=density, **shape_params
)
elif type_name == "Plane":
normal_str = parse_generic(prim, "axis", "Z").upper()
geo_rot = geo_tf.q
if normal_str != "Y":
normal = str2axis(normal_str)
c = np.cross(normal, (0.0, 1.0, 0.0))
angle = np.arcsin(np.linalg.norm(c))
axis = c / np.linalg.norm(c)
geo_rot = wp.mul(geo_rot, wp.quat_from_axis_angle(axis, angle))
width = parse_float(prim, "width", 0.0) * scale[0]
length = parse_float(prim, "length", 0.0) * scale[1]
shape_id = builder.add_shape_plane(
body=body_id,
pos=geo_tf.p,
rot=geo_rot,
width=width,
length=length,
thickness=contact_thickness,
**shape_params,
)
elif type_name == "Capsule":
axis_str = parse_generic(prim, "axis", "Z").upper()
radius = parse_float(prim, "radius", 0.5) * scale[0]
half_height = parse_float(prim, "height", 2.0) / 2 * scale[1]
assert not has_attribute(prim, "extents"), "Capsule extents are not supported."
shape_id = builder.add_shape_capsule(
body_id,
geo_tf.p,
geo_tf.q,
radius,
half_height,
density=density,
up_axis="XYZ".index(axis_str),
**shape_params,
)
elif type_name == "Cylinder":
axis_str = parse_generic(prim, "axis", "Z").upper()
radius = parse_float(prim, "radius", 0.5) * scale[0]
half_height = parse_float(prim, "height", 2.0) / 2 * scale[1]
assert not has_attribute(prim, "extents"), "Cylinder extents are not supported."
shape_id = builder.add_shape_cylinder(
body_id,
geo_tf.p,
geo_tf.q,
radius,
half_height,
density=density,
up_axis="XYZ".index(axis_str),
**shape_params,
)
elif type_name == "Cone":
axis_str = parse_generic(prim, "axis", "Z").upper()
radius = parse_float(prim, "radius", 0.5) * scale[0]
half_height = parse_float(prim, "height", 2.0) / 2 * scale[1]
assert not has_attribute(prim, "extents"), "Cone extents are not supported."
shape_id = builder.add_shape_cone(
body_id,
geo_tf.p,
geo_tf.q,
radius,
half_height,
density=density,
up_axis="XYZ".index(axis_str),
**shape_params,
)
elif type_name == "Mesh":
mesh = UsdGeom.Mesh(prim)
points = np.array(mesh.GetPointsAttr().Get(), dtype=np.float32)
indices = np.array(mesh.GetFaceVertexIndicesAttr().Get(), dtype=np.float32)
counts = mesh.GetFaceVertexCountsAttr().Get()
faces = []
face_id = 0
for count in counts:
if count == 3:
faces.append(indices[face_id : face_id + 3])
elif count == 4:
faces.append(indices[face_id : face_id + 3])
faces.append(indices[[face_id, face_id + 2, face_id + 3]])
else:
# assert False, f"Error while parsing USD mesh {path}: encountered polygon with {count} vertices, but only triangles and quads are supported."
continue
face_id += count
m = wp.sim.Mesh(points, np.array(faces, dtype=np.int32).flatten())
shape_id = builder.add_shape_mesh(
body_id,
geo_tf.p,
geo_tf.q,
scale=scale,
mesh=m,
density=density,
thickness=contact_thickness,
**shape_params,
)
else:
print(f"Warning: Unsupported geometry type {type_name} at {path}.")
return
path_body_map[path] = body_id
path_shape_map[path] = shape_id
path_shape_scale[path] = scale
if prim.HasRelationship("physics:filteredPairs"):
other_paths = prim.GetRelationship("physics:filteredPairs").GetTargets()
for other_path in other_paths:
path_collision_filters.add((path, str(other_path)))
if "PhysicsCollisionAPI" not in schemas or not parse_generic(prim, "physics:collisionEnabled", True):
no_collision_shapes.add(shape_id)
else:
print(f"Warning: encountered unsupported prim type {type_name}")
# update mass properties of rigid bodies in cases where properties are defined with higher precedence
if body_id >= 0:
com = parse_vec(prim, "physics:centerOfMass")
if com is not None:
# overwrite COM
builder.body_com[body_id] = com * scale
if mass is not None and not (is_rigid_body and mass == 0.0):
mass_ratio = mass / builder.body_mass[body_id]
# mass has precedence over density, so we overwrite the mass computed from density
builder.body_mass[body_id] = mass * mass_unit
if mass > 0.0:
builder.body_inv_mass[body_id] = 1.0 / builder.body_mass[body_id]
else:
builder.body_inv_mass[body_id] = 0.0
# update inertia
builder.body_inertia[body_id] *= mass_ratio
if np.array(builder.body_inertia[body_id]).any():
builder.body_inv_inertia[body_id] = wp.inverse(builder.body_inertia[body_id])
else:
builder.body_inv_inertia[body_id] = wp.mat33(*np.zeros((3, 3), dtype=np.float32))
if np.linalg.norm(i_diag) > 0.0:
rot = np.array(wp.quat_to_matrix(i_rot), dtype=np.float32).reshape(3, 3)
inertia = rot @ np.diag(i_diag) @ rot.T
builder.body_inertia[body_id] = inertia
if inertia.any():
builder.body_inv_inertia[body_id] = wp.inverse(wp.mat33(*inertia))
else:
builder.body_inv_inertia[body_id] = wp.mat33(*np.zeros((3, 3), dtype=np.float32))
parse_prim(
stage.GetDefaultPrim(), incoming_xform=wp.transform(), incoming_scale=np.ones(3, dtype=np.float32) * linear_unit
)
shape_count = len(builder.shape_geo_type)
# apply collision filters now that we have added all shapes
for path1, path2 in path_collision_filters:
shape1 = path_shape_map[path1]
shape2 = path_shape_map[path2]
builder.shape_collision_filter_pairs.add((shape1, shape2))
# apply collision filters to all shapes that have no collision
for shape_id in no_collision_shapes:
for other_shape_id in range(shape_count):
if other_shape_id != shape_id:
builder.shape_collision_filter_pairs.add((shape_id, other_shape_id))
# return stage parameters
return {
"fps": stage.GetFramesPerSecond(),
"duration": stage.GetEndTimeCode() - stage.GetStartTimeCode(),
"up_axis": UsdGeom.GetStageUpAxis(stage).upper(),
"path_shape_map": path_shape_map,
"path_body_map": path_body_map,
"path_shape_scale": path_shape_scale,
"mass_unit": mass_unit,
"linear_unit": linear_unit,
}
def resolve_usd_from_url(url: str, target_folder_name: str = None, export_usda: bool = False):
"""
Downloads a USD file from a URL and resolves all references to other USD files to be downloaded to the given target folder.
Args:
url (str): URL to the USD file.
target_folder_name (str): Target folder name. If None, a timestamped folder will be created in the current directory.
export_usda (bool): If True, converts each downloaded USD file to USDA and saves the additional USDA file in the target folder with the same base name as the original USD file.
Returns:
str: File path to the downloaded USD file.
"""
import datetime
import os
import requests
try:
from pxr import Usd
except ImportError as e:
raise ImportError("Failed to import pxr. Please install USD (e.g. via `pip install usd-core`).") from e
response = requests.get(url, allow_redirects=True)
if response.status_code != 200:
raise RuntimeError(f"Failed to download USD file. Status code: {response.status_code}")
file = response.content
dot = os.path.extsep
base = os.path.basename(url)
url_folder = os.path.dirname(url)
base_name = dot.join(base.split(dot)[:-1])
if target_folder_name is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
target_folder_name = os.path.join(".usd_cache", f"{base_name}_{timestamp}")
os.makedirs(target_folder_name, exist_ok=True)
target_filename = os.path.join(target_folder_name, base)
with open(target_filename, "wb") as f:
f.write(file)
stage = Usd.Stage.Open(target_filename, Usd.Stage.LoadNone)
stage_str = stage.GetRootLayer().ExportToString()
print(f"Downloaded USD file to {target_filename}.")
if export_usda:
usda_filename = os.path.join(target_folder_name, base_name + ".usda")
with open(usda_filename, "w") as f:
f.write(stage_str)
print(f"Exported USDA file to {usda_filename}.")
# parse referenced USD files like `references = @./franka_collisions.usd@`
downloaded = set()
for match in re.finditer(r"references.=.@(.*?)@", stage_str):
refname = match.group(1)
if refname.startswith("./"):
refname = refname[2:]
if refname in downloaded:
continue
try:
response = requests.get(f"{url_folder}/{refname}", allow_redirects=True)
if response.status_code != 200:
print(f"Failed to download reference {refname}. Status code: {response.status_code}")
continue
file = response.content
refdir = os.path.dirname(refname)
if refdir:
os.makedirs(os.path.join(target_folder_name, refdir), exist_ok=True)
ref_filename = os.path.join(target_folder_name, refname)
if not os.path.exists(ref_filename):
with open(ref_filename, "wb") as f:
f.write(file)
downloaded.add(refname)
print(f"Downloaded USD reference {refname} to {ref_filename}.")
if export_usda:
ref_stage = Usd.Stage.Open(ref_filename, Usd.Stage.LoadNone)
ref_stage_str = ref_stage.GetRootLayer().ExportToString()
base = os.path.basename(ref_filename)
base_name = dot.join(base.split(dot)[:-1])
usda_filename = os.path.join(target_folder_name, base_name + ".usda")
with open(usda_filename, "w") as f:
f.write(ref_stage_str)
print(f"Exported USDA file to {usda_filename}.")
except Exception:
print(f"Failed to download {refname}.")
return target_filename
| 40,498 | Python | 44.606982 | 195 | 0.545953 |
NVIDIA/warp/warp/sim/integrator_featherstone.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import warp as wp
from .articulation import (
compute_2d_rotational_dofs,
compute_3d_rotational_dofs,
eval_fk,
)
from .integrator import Integrator
from .integrator_euler import (
eval_bending_forces,
eval_joint_force,
eval_muscle_forces,
eval_particle_body_contact_forces,
eval_particle_forces,
eval_particle_ground_contact_forces,
eval_rigid_contacts,
eval_spring_forces,
eval_tetrahedral_forces,
eval_triangle_contact_forces,
eval_triangle_forces,
)
from .model import Control, Model, State
# Frank & Park definition 3.20, pg 100
@wp.func
def transform_twist(t: wp.transform, x: wp.spatial_vector):
q = wp.transform_get_rotation(t)
p = wp.transform_get_translation(t)
w = wp.spatial_top(x)
v = wp.spatial_bottom(x)
w = wp.quat_rotate(q, w)
v = wp.quat_rotate(q, v) + wp.cross(p, w)
return wp.spatial_vector(w, v)
@wp.func
def transform_wrench(t: wp.transform, x: wp.spatial_vector):
q = wp.transform_get_rotation(t)
p = wp.transform_get_translation(t)
w = wp.spatial_top(x)
v = wp.spatial_bottom(x)
v = wp.quat_rotate(q, v)
w = wp.quat_rotate(q, w) + wp.cross(p, v)
return wp.spatial_vector(w, v)
@wp.func
def spatial_adjoint(R: wp.mat33, S: wp.mat33):
# T = [R 0]
# [S R]
# fmt: off
return wp.spatial_matrix(
R[0, 0], R[0, 1], R[0, 2], 0.0, 0.0, 0.0,
R[1, 0], R[1, 1], R[1, 2], 0.0, 0.0, 0.0,
R[2, 0], R[2, 1], R[2, 2], 0.0, 0.0, 0.0,
S[0, 0], S[0, 1], S[0, 2], R[0, 0], R[0, 1], R[0, 2],
S[1, 0], S[1, 1], S[1, 2], R[1, 0], R[1, 1], R[1, 2],
S[2, 0], S[2, 1], S[2, 2], R[2, 0], R[2, 1], R[2, 2],
)
# fmt: on
@wp.kernel
def compute_spatial_inertia(
body_inertia: wp.array(dtype=wp.mat33),
body_mass: wp.array(dtype=float),
# outputs
body_I_m: wp.array(dtype=wp.spatial_matrix),
):
tid = wp.tid()
I = body_inertia[tid]
m = body_mass[tid]
# fmt: off
body_I_m[tid] = wp.spatial_matrix(
I[0, 0], I[0, 1], I[0, 2], 0.0, 0.0, 0.0,
I[1, 0], I[1, 1], I[1, 2], 0.0, 0.0, 0.0,
I[2, 0], I[2, 1], I[2, 2], 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, m, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, m, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, m,
)
# fmt: on
@wp.kernel
def compute_com_transforms(
body_com: wp.array(dtype=wp.vec3),
# outputs
body_X_com: wp.array(dtype=wp.transform),
):
tid = wp.tid()
com = body_com[tid]
body_X_com[tid] = wp.transform(com, wp.quat_identity())
# computes adj_t^-T*I*adj_t^-1 (tensor change of coordinates), Frank & Park, section 8.2.3, pg 290
@wp.func
def spatial_transform_inertia(t: wp.transform, I: wp.spatial_matrix):
t_inv = wp.transform_inverse(t)
q = wp.transform_get_rotation(t_inv)
p = wp.transform_get_translation(t_inv)
r1 = wp.quat_rotate(q, wp.vec3(1.0, 0.0, 0.0))
r2 = wp.quat_rotate(q, wp.vec3(0.0, 1.0, 0.0))
r3 = wp.quat_rotate(q, wp.vec3(0.0, 0.0, 1.0))
R = wp.mat33(r1, r2, r3)
S = wp.skew(p) @ R
T = spatial_adjoint(R, S)
return wp.mul(wp.mul(wp.transpose(T), I), T)
# compute transform across a joint
@wp.func
def jcalc_transform(
type: int,
joint_axis: wp.array(dtype=wp.vec3),
axis_start: int,
lin_axis_count: int,
ang_axis_count: int,
joint_q: wp.array(dtype=float),
start: int,
):
if type == wp.sim.JOINT_PRISMATIC:
q = joint_q[start]
axis = joint_axis[axis_start]
X_jc = wp.transform(axis * q, wp.quat_identity())
return X_jc
if type == wp.sim.JOINT_REVOLUTE:
q = joint_q[start]
axis = joint_axis[axis_start]
X_jc = wp.transform(wp.vec3(), wp.quat_from_axis_angle(axis, q))
return X_jc
if type == wp.sim.JOINT_BALL:
qx = joint_q[start + 0]
qy = joint_q[start + 1]
qz = joint_q[start + 2]
qw = joint_q[start + 3]
X_jc = wp.transform(wp.vec3(), wp.quat(qx, qy, qz, qw))
return X_jc
if type == wp.sim.JOINT_FIXED:
X_jc = wp.transform_identity()
return X_jc
if type == wp.sim.JOINT_FREE or type == wp.sim.JOINT_DISTANCE:
px = joint_q[start + 0]
py = joint_q[start + 1]
pz = joint_q[start + 2]
qx = joint_q[start + 3]
qy = joint_q[start + 4]
qz = joint_q[start + 5]
qw = joint_q[start + 6]
X_jc = wp.transform(wp.vec3(px, py, pz), wp.quat(qx, qy, qz, qw))
return X_jc
if type == wp.sim.JOINT_COMPOUND:
rot, _ = compute_3d_rotational_dofs(
joint_axis[axis_start],
joint_axis[axis_start + 1],
joint_axis[axis_start + 2],
joint_q[start + 0],
joint_q[start + 1],
joint_q[start + 2],
0.0,
0.0,
0.0,
)
X_jc = wp.transform(wp.vec3(), rot)
return X_jc
if type == wp.sim.JOINT_UNIVERSAL:
rot, _ = compute_2d_rotational_dofs(
joint_axis[axis_start],
joint_axis[axis_start + 1],
joint_q[start + 0],
joint_q[start + 1],
0.0,
0.0,
)
X_jc = wp.transform(wp.vec3(), rot)
return X_jc
if type == wp.sim.JOINT_D6:
pos = wp.vec3(0.0)
rot = wp.quat_identity()
# unroll for loop to ensure joint actions remain differentiable
# (since differentiating through a for loop that updates a local variable is not supported)
if lin_axis_count > 0:
axis = joint_axis[axis_start + 0]
pos += axis * joint_q[start + 0]
if lin_axis_count > 1:
axis = joint_axis[axis_start + 1]
pos += axis * joint_q[start + 1]
if lin_axis_count > 2:
axis = joint_axis[axis_start + 2]
pos += axis * joint_q[start + 2]
ia = axis_start + lin_axis_count
iq = start + lin_axis_count
if ang_axis_count == 1:
axis = joint_axis[ia]
rot = wp.quat_from_axis_angle(axis, joint_q[iq])
if ang_axis_count == 2:
rot, _ = compute_2d_rotational_dofs(
joint_axis[ia + 0],
joint_axis[ia + 1],
joint_q[iq + 0],
joint_q[iq + 1],
0.0,
0.0,
)
if ang_axis_count == 3:
rot, _ = compute_3d_rotational_dofs(
joint_axis[ia + 0],
joint_axis[ia + 1],
joint_axis[ia + 2],
joint_q[iq + 0],
joint_q[iq + 1],
joint_q[iq + 2],
0.0,
0.0,
0.0,
)
X_jc = wp.transform(pos, rot)
return X_jc
# default case
return wp.transform_identity()
# compute motion subspace and velocity for a joint
@wp.func
def jcalc_motion(
type: int,
joint_axis: wp.array(dtype=wp.vec3),
axis_start: int,
lin_axis_count: int,
ang_axis_count: int,
X_sc: wp.transform,
joint_q: wp.array(dtype=float),
joint_qd: wp.array(dtype=float),
q_start: int,
qd_start: int,
# outputs
joint_S_s: wp.array(dtype=wp.spatial_vector),
):
if type == wp.sim.JOINT_PRISMATIC:
axis = joint_axis[axis_start]
S_s = transform_twist(X_sc, wp.spatial_vector(wp.vec3(), axis))
v_j_s = S_s * joint_qd[qd_start]
joint_S_s[qd_start] = S_s
return v_j_s
if type == wp.sim.JOINT_REVOLUTE:
axis = joint_axis[axis_start]
S_s = transform_twist(X_sc, wp.spatial_vector(axis, wp.vec3()))
v_j_s = S_s * joint_qd[qd_start]
joint_S_s[qd_start] = S_s
return v_j_s
if type == wp.sim.JOINT_UNIVERSAL:
axis_0 = joint_axis[axis_start + 0]
axis_1 = joint_axis[axis_start + 1]
q_off = wp.quat_from_matrix(wp.mat33(axis_0, axis_1, wp.cross(axis_0, axis_1)))
local_0 = wp.quat_rotate(q_off, wp.vec3(1.0, 0.0, 0.0))
local_1 = wp.quat_rotate(q_off, wp.vec3(0.0, 1.0, 0.0))
axis_0 = local_0
q_0 = wp.quat_from_axis_angle(axis_0, joint_q[q_start + 0])
axis_1 = wp.quat_rotate(q_0, local_1)
S_0 = transform_twist(X_sc, wp.spatial_vector(axis_0, wp.vec3()))
S_1 = transform_twist(X_sc, wp.spatial_vector(axis_1, wp.vec3()))
joint_S_s[qd_start + 0] = S_0
joint_S_s[qd_start + 1] = S_1
return S_0 * joint_qd[qd_start + 0] + S_1 * joint_qd[qd_start + 1]
if type == wp.sim.JOINT_COMPOUND:
axis_0 = joint_axis[axis_start + 0]
axis_1 = joint_axis[axis_start + 1]
axis_2 = joint_axis[axis_start + 2]
q_off = wp.quat_from_matrix(wp.mat33(axis_0, axis_1, axis_2))
local_0 = wp.quat_rotate(q_off, wp.vec3(1.0, 0.0, 0.0))
local_1 = wp.quat_rotate(q_off, wp.vec3(0.0, 1.0, 0.0))
local_2 = wp.quat_rotate(q_off, wp.vec3(0.0, 0.0, 1.0))
axis_0 = local_0
q_0 = wp.quat_from_axis_angle(axis_0, joint_q[q_start + 0])
axis_1 = wp.quat_rotate(q_0, local_1)
q_1 = wp.quat_from_axis_angle(axis_1, joint_q[q_start + 1])
axis_2 = wp.quat_rotate(q_1 * q_0, local_2)
S_0 = transform_twist(X_sc, wp.spatial_vector(axis_0, wp.vec3()))
S_1 = transform_twist(X_sc, wp.spatial_vector(axis_1, wp.vec3()))
S_2 = transform_twist(X_sc, wp.spatial_vector(axis_2, wp.vec3()))
joint_S_s[qd_start + 0] = S_0
joint_S_s[qd_start + 1] = S_1
joint_S_s[qd_start + 2] = S_2
return S_0 * joint_qd[qd_start + 0] + S_1 * joint_qd[qd_start + 1] + S_2 * joint_qd[qd_start + 2]
if type == wp.sim.JOINT_D6:
v_j_s = wp.spatial_vector()
if lin_axis_count > 0:
axis = joint_axis[axis_start + 0]
S_s = transform_twist(X_sc, wp.spatial_vector(wp.vec3(), axis))
v_j_s += S_s * joint_qd[qd_start + 0]
joint_S_s[qd_start + 0] = S_s
if lin_axis_count > 1:
axis = joint_axis[axis_start + 1]
S_s = transform_twist(X_sc, wp.spatial_vector(wp.vec3(), axis))
v_j_s += S_s * joint_qd[qd_start + 1]
joint_S_s[qd_start + 1] = S_s
if lin_axis_count > 2:
axis = joint_axis[axis_start + 2]
S_s = transform_twist(X_sc, wp.spatial_vector(wp.vec3(), axis))
v_j_s += S_s * joint_qd[qd_start + 2]
joint_S_s[qd_start + 2] = S_s
if ang_axis_count > 0:
axis = joint_axis[axis_start + lin_axis_count + 0]
S_s = transform_twist(X_sc, wp.spatial_vector(axis, wp.vec3()))
v_j_s += S_s * joint_qd[qd_start + lin_axis_count + 0]
joint_S_s[qd_start + lin_axis_count + 0] = S_s
if ang_axis_count > 1:
axis = joint_axis[axis_start + lin_axis_count + 1]
S_s = transform_twist(X_sc, wp.spatial_vector(axis, wp.vec3()))
v_j_s += S_s * joint_qd[qd_start + lin_axis_count + 1]
joint_S_s[qd_start + lin_axis_count + 1] = S_s
if ang_axis_count > 2:
axis = joint_axis[axis_start + lin_axis_count + 2]
S_s = transform_twist(X_sc, wp.spatial_vector(axis, wp.vec3()))
v_j_s += S_s * joint_qd[qd_start + lin_axis_count + 2]
joint_S_s[qd_start + lin_axis_count + 2] = S_s
return v_j_s
if type == wp.sim.JOINT_BALL:
S_0 = transform_twist(X_sc, wp.spatial_vector(1.0, 0.0, 0.0, 0.0, 0.0, 0.0))
S_1 = transform_twist(X_sc, wp.spatial_vector(0.0, 1.0, 0.0, 0.0, 0.0, 0.0))
S_2 = transform_twist(X_sc, wp.spatial_vector(0.0, 0.0, 1.0, 0.0, 0.0, 0.0))
joint_S_s[qd_start + 0] = S_0
joint_S_s[qd_start + 1] = S_1
joint_S_s[qd_start + 2] = S_2
return S_0 * joint_qd[qd_start + 0] + S_1 * joint_qd[qd_start + 1] + S_2 * joint_qd[qd_start + 2]
if type == wp.sim.JOINT_FIXED:
return wp.spatial_vector()
if type == wp.sim.JOINT_FREE or type == wp.sim.JOINT_DISTANCE:
v_j_s = transform_twist(
X_sc,
wp.spatial_vector(
joint_qd[qd_start + 0],
joint_qd[qd_start + 1],
joint_qd[qd_start + 2],
joint_qd[qd_start + 3],
joint_qd[qd_start + 4],
joint_qd[qd_start + 5],
),
)
joint_S_s[qd_start + 0] = transform_twist(X_sc, wp.spatial_vector(1.0, 0.0, 0.0, 0.0, 0.0, 0.0))
joint_S_s[qd_start + 1] = transform_twist(X_sc, wp.spatial_vector(0.0, 1.0, 0.0, 0.0, 0.0, 0.0))
joint_S_s[qd_start + 2] = transform_twist(X_sc, wp.spatial_vector(0.0, 0.0, 1.0, 0.0, 0.0, 0.0))
joint_S_s[qd_start + 3] = transform_twist(X_sc, wp.spatial_vector(0.0, 0.0, 0.0, 1.0, 0.0, 0.0))
joint_S_s[qd_start + 4] = transform_twist(X_sc, wp.spatial_vector(0.0, 0.0, 0.0, 0.0, 1.0, 0.0))
joint_S_s[qd_start + 5] = transform_twist(X_sc, wp.spatial_vector(0.0, 0.0, 0.0, 0.0, 0.0, 1.0))
return v_j_s
wp.printf("jcalc_motion not implemented for joint type %d\n", type)
# default case
return wp.spatial_vector()
# computes joint space forces/torques in tau
@wp.func
def jcalc_tau(
type: int,
joint_target_ke: wp.array(dtype=float),
joint_target_kd: wp.array(dtype=float),
joint_limit_ke: wp.array(dtype=float),
joint_limit_kd: wp.array(dtype=float),
joint_S_s: wp.array(dtype=wp.spatial_vector),
joint_q: wp.array(dtype=float),
joint_qd: wp.array(dtype=float),
joint_act: wp.array(dtype=float),
joint_axis_mode: wp.array(dtype=int),
joint_limit_lower: wp.array(dtype=float),
joint_limit_upper: wp.array(dtype=float),
coord_start: int,
dof_start: int,
axis_start: int,
lin_axis_count: int,
ang_axis_count: int,
body_f_s: wp.spatial_vector,
# outputs
tau: wp.array(dtype=float),
):
if type == wp.sim.JOINT_PRISMATIC or type == wp.sim.JOINT_REVOLUTE:
S_s = joint_S_s[dof_start]
q = joint_q[coord_start]
qd = joint_qd[dof_start]
act = joint_act[axis_start]
lower = joint_limit_lower[axis_start]
upper = joint_limit_upper[axis_start]
limit_ke = joint_limit_ke[axis_start]
limit_kd = joint_limit_kd[axis_start]
target_ke = joint_target_ke[axis_start]
target_kd = joint_target_kd[axis_start]
mode = joint_axis_mode[axis_start]
# total torque / force on the joint
t = -wp.dot(S_s, body_f_s) + eval_joint_force(
q, qd, act, target_ke, target_kd, lower, upper, limit_ke, limit_kd, mode
)
tau[dof_start] = t
return
if type == wp.sim.JOINT_BALL:
# target_ke = joint_target_ke[axis_start]
# target_kd = joint_target_kd[axis_start]
for i in range(3):
S_s = joint_S_s[dof_start + i]
# w = joint_qd[dof_start + i]
# r = joint_q[coord_start + i]
tau[dof_start + i] = -wp.dot(S_s, body_f_s) # - w * target_kd - r * target_ke
return
if type == wp.sim.JOINT_FREE or type == wp.sim.JOINT_DISTANCE:
for i in range(6):
S_s = joint_S_s[dof_start + i]
tau[dof_start + i] = -wp.dot(S_s, body_f_s)
return
if type == wp.sim.JOINT_COMPOUND or type == wp.sim.JOINT_UNIVERSAL or type == wp.sim.JOINT_D6:
axis_count = lin_axis_count + ang_axis_count
for i in range(axis_count):
S_s = joint_S_s[dof_start + i]
q = joint_q[coord_start + i]
qd = joint_qd[dof_start + i]
act = joint_act[axis_start + i]
lower = joint_limit_lower[axis_start + i]
upper = joint_limit_upper[axis_start + i]
limit_ke = joint_limit_ke[axis_start + i]
limit_kd = joint_limit_kd[axis_start + i]
target_ke = joint_target_ke[axis_start + i]
target_kd = joint_target_kd[axis_start + i]
mode = joint_axis_mode[axis_start + i]
f = eval_joint_force(q, qd, act, target_ke, target_kd, lower, upper, limit_ke, limit_kd, mode)
# total torque / force on the joint
t = -wp.dot(S_s, body_f_s) + f
tau[dof_start + i] = t
return
@wp.func
def jcalc_integrate(
type: int,
joint_q: wp.array(dtype=float),
joint_qd: wp.array(dtype=float),
joint_qdd: wp.array(dtype=float),
coord_start: int,
dof_start: int,
lin_axis_count: int,
ang_axis_count: int,
dt: float,
# outputs
joint_q_new: wp.array(dtype=float),
joint_qd_new: wp.array(dtype=float),
):
if type == wp.sim.JOINT_FIXED:
return
# prismatic / revolute
if type == wp.sim.JOINT_PRISMATIC or type == wp.sim.JOINT_REVOLUTE:
qdd = joint_qdd[dof_start]
qd = joint_qd[dof_start]
q = joint_q[coord_start]
qd_new = qd + qdd * dt
q_new = q + qd_new * dt
joint_qd_new[dof_start] = qd_new
joint_q_new[coord_start] = q_new
return
# ball
if type == wp.sim.JOINT_BALL:
m_j = wp.vec3(joint_qdd[dof_start + 0], joint_qdd[dof_start + 1], joint_qdd[dof_start + 2])
w_j = wp.vec3(joint_qd[dof_start + 0], joint_qd[dof_start + 1], joint_qd[dof_start + 2])
r_j = wp.quat(
joint_q[coord_start + 0], joint_q[coord_start + 1], joint_q[coord_start + 2], joint_q[coord_start + 3]
)
# symplectic Euler
w_j_new = w_j + m_j * dt
drdt_j = wp.quat(w_j_new, 0.0) * r_j * 0.5
# new orientation (normalized)
r_j_new = wp.normalize(r_j + drdt_j * dt)
# update joint coords
joint_q_new[coord_start + 0] = r_j_new[0]
joint_q_new[coord_start + 1] = r_j_new[1]
joint_q_new[coord_start + 2] = r_j_new[2]
joint_q_new[coord_start + 3] = r_j_new[3]
# update joint vel
joint_qd_new[dof_start + 0] = w_j_new[0]
joint_qd_new[dof_start + 1] = w_j_new[1]
joint_qd_new[dof_start + 2] = w_j_new[2]
return
# free joint
if type == wp.sim.JOINT_FREE or type == wp.sim.JOINT_DISTANCE:
# dofs: qd = (omega_x, omega_y, omega_z, vel_x, vel_y, vel_z)
# coords: q = (trans_x, trans_y, trans_z, quat_x, quat_y, quat_z, quat_w)
# angular and linear acceleration
m_s = wp.vec3(joint_qdd[dof_start + 0], joint_qdd[dof_start + 1], joint_qdd[dof_start + 2])
a_s = wp.vec3(joint_qdd[dof_start + 3], joint_qdd[dof_start + 4], joint_qdd[dof_start + 5])
# angular and linear velocity
w_s = wp.vec3(joint_qd[dof_start + 0], joint_qd[dof_start + 1], joint_qd[dof_start + 2])
v_s = wp.vec3(joint_qd[dof_start + 3], joint_qd[dof_start + 4], joint_qd[dof_start + 5])
# symplectic Euler
w_s = w_s + m_s * dt
v_s = v_s + a_s * dt
# translation of origin
p_s = wp.vec3(joint_q[coord_start + 0], joint_q[coord_start + 1], joint_q[coord_start + 2])
# linear vel of origin (note q/qd switch order of linear angular elements)
# note we are converting the body twist in the space frame (w_s, v_s) to compute center of mass velcity
dpdt_s = v_s + wp.cross(w_s, p_s)
# quat and quat derivative
r_s = wp.quat(
joint_q[coord_start + 3], joint_q[coord_start + 4], joint_q[coord_start + 5], joint_q[coord_start + 6]
)
drdt_s = wp.quat(w_s, 0.0) * r_s * 0.5
# new orientation (normalized)
p_s_new = p_s + dpdt_s * dt
r_s_new = wp.normalize(r_s + drdt_s * dt)
# update transform
joint_q_new[coord_start + 0] = p_s_new[0]
joint_q_new[coord_start + 1] = p_s_new[1]
joint_q_new[coord_start + 2] = p_s_new[2]
joint_q_new[coord_start + 3] = r_s_new[0]
joint_q_new[coord_start + 4] = r_s_new[1]
joint_q_new[coord_start + 5] = r_s_new[2]
joint_q_new[coord_start + 6] = r_s_new[3]
# update joint_twist
joint_qd_new[dof_start + 0] = w_s[0]
joint_qd_new[dof_start + 1] = w_s[1]
joint_qd_new[dof_start + 2] = w_s[2]
joint_qd_new[dof_start + 3] = v_s[0]
joint_qd_new[dof_start + 4] = v_s[1]
joint_qd_new[dof_start + 5] = v_s[2]
return
# other joint types (compound, universal, D6)
if type == wp.sim.JOINT_COMPOUND or type == wp.sim.JOINT_UNIVERSAL or type == wp.sim.JOINT_D6:
axis_count = lin_axis_count + ang_axis_count
for i in range(axis_count):
qdd = joint_qdd[dof_start + i]
qd = joint_qd[dof_start + i]
q = joint_q[coord_start + i]
qd_new = qd + qdd * dt
q_new = q + qd_new * dt
joint_qd_new[dof_start + i] = qd_new
joint_q_new[coord_start + i] = q_new
return
@wp.func
def compute_link_transform(
i: int,
joint_type: wp.array(dtype=int),
joint_parent: wp.array(dtype=int),
joint_child: wp.array(dtype=int),
joint_q_start: wp.array(dtype=int),
joint_q: wp.array(dtype=float),
joint_X_p: wp.array(dtype=wp.transform),
joint_X_c: wp.array(dtype=wp.transform),
body_X_com: wp.array(dtype=wp.transform),
joint_axis: wp.array(dtype=wp.vec3),
joint_axis_start: wp.array(dtype=int),
joint_axis_dim: wp.array(dtype=int, ndim=2),
# outputs
body_q: wp.array(dtype=wp.transform),
body_q_com: wp.array(dtype=wp.transform),
):
# parent transform
parent = joint_parent[i]
child = joint_child[i]
# parent transform in spatial coordinates
X_pj = joint_X_p[i]
X_cj = joint_X_c[i]
# parent anchor frame in world space
X_wpj = X_pj
if parent >= 0:
X_wp = body_q[parent]
X_wpj = X_wp * X_wpj
type = joint_type[i]
axis_start = joint_axis_start[i]
lin_axis_count = joint_axis_dim[i, 0]
ang_axis_count = joint_axis_dim[i, 1]
coord_start = joint_q_start[i]
# compute transform across joint
X_j = jcalc_transform(type, joint_axis, axis_start, lin_axis_count, ang_axis_count, joint_q, coord_start)
# transform from world to joint anchor frame at child body
X_wcj = X_wpj * X_j
# transform from world to child body frame
X_wc = X_wcj * wp.transform_inverse(X_cj)
# compute transform of center of mass
X_cm = body_X_com[child]
X_sm = X_wc * X_cm
# store geometry transforms
body_q[child] = X_wc
body_q_com[child] = X_sm
@wp.kernel
def eval_rigid_fk(
articulation_start: wp.array(dtype=int),
joint_type: wp.array(dtype=int),
joint_parent: wp.array(dtype=int),
joint_child: wp.array(dtype=int),
joint_q_start: wp.array(dtype=int),
joint_q: wp.array(dtype=float),
joint_X_p: wp.array(dtype=wp.transform),
joint_X_c: wp.array(dtype=wp.transform),
body_X_com: wp.array(dtype=wp.transform),
joint_axis: wp.array(dtype=wp.vec3),
joint_axis_start: wp.array(dtype=int),
joint_axis_dim: wp.array(dtype=int, ndim=2),
# outputs
body_q: wp.array(dtype=wp.transform),
body_q_com: wp.array(dtype=wp.transform),
):
# one thread per-articulation
index = wp.tid()
start = articulation_start[index]
end = articulation_start[index + 1]
for i in range(start, end):
compute_link_transform(
i,
joint_type,
joint_parent,
joint_child,
joint_q_start,
joint_q,
joint_X_p,
joint_X_c,
body_X_com,
joint_axis,
joint_axis_start,
joint_axis_dim,
body_q,
body_q_com,
)
@wp.func
def spatial_cross(a: wp.spatial_vector, b: wp.spatial_vector):
w_a = wp.spatial_top(a)
v_a = wp.spatial_bottom(a)
w_b = wp.spatial_top(b)
v_b = wp.spatial_bottom(b)
w = wp.cross(w_a, w_b)
v = wp.cross(w_a, v_b) + wp.cross(v_a, w_b)
return wp.spatial_vector(w, v)
@wp.func
def spatial_cross_dual(a: wp.spatial_vector, b: wp.spatial_vector):
w_a = wp.spatial_top(a)
v_a = wp.spatial_bottom(a)
w_b = wp.spatial_top(b)
v_b = wp.spatial_bottom(b)
w = wp.cross(w_a, w_b) + wp.cross(v_a, v_b)
v = wp.cross(w_a, v_b)
return wp.spatial_vector(w, v)
@wp.func
def dense_index(stride: int, i: int, j: int):
return i * stride + j
@wp.func
def compute_link_velocity(
i: int,
joint_type: wp.array(dtype=int),
joint_parent: wp.array(dtype=int),
joint_child: wp.array(dtype=int),
joint_q_start: wp.array(dtype=int),
joint_qd_start: wp.array(dtype=int),
joint_q: wp.array(dtype=float),
joint_qd: wp.array(dtype=float),
joint_axis: wp.array(dtype=wp.vec3),
joint_axis_start: wp.array(dtype=int),
joint_axis_dim: wp.array(dtype=int, ndim=2),
body_I_m: wp.array(dtype=wp.spatial_matrix),
body_q: wp.array(dtype=wp.transform),
body_q_com: wp.array(dtype=wp.transform),
joint_X_p: wp.array(dtype=wp.transform),
joint_X_c: wp.array(dtype=wp.transform),
gravity: wp.vec3,
# outputs
joint_S_s: wp.array(dtype=wp.spatial_vector),
body_I_s: wp.array(dtype=wp.spatial_matrix),
body_v_s: wp.array(dtype=wp.spatial_vector),
body_f_s: wp.array(dtype=wp.spatial_vector),
body_a_s: wp.array(dtype=wp.spatial_vector),
):
type = joint_type[i]
child = joint_child[i]
parent = joint_parent[i]
q_start = joint_q_start[i]
qd_start = joint_qd_start[i]
X_pj = joint_X_p[i]
# X_cj = joint_X_c[i]
# parent anchor frame in world space
X_wpj = X_pj
if parent >= 0:
X_wp = body_q[parent]
X_wpj = X_wp * X_wpj
# compute motion subspace and velocity across the joint (also stores S_s to global memory)
axis_start = joint_axis_start[i]
lin_axis_count = joint_axis_dim[i, 0]
ang_axis_count = joint_axis_dim[i, 1]
v_j_s = jcalc_motion(
type,
joint_axis,
axis_start,
lin_axis_count,
ang_axis_count,
X_wpj,
joint_q,
joint_qd,
q_start,
qd_start,
joint_S_s,
)
# parent velocity
v_parent_s = wp.spatial_vector()
a_parent_s = wp.spatial_vector()
if parent >= 0:
v_parent_s = body_v_s[parent]
a_parent_s = body_a_s[parent]
# body velocity, acceleration
v_s = v_parent_s + v_j_s
a_s = a_parent_s + spatial_cross(v_s, v_j_s) # + joint_S_s[i]*self.joint_qdd[i]
# compute body forces
X_sm = body_q_com[child]
I_m = body_I_m[child]
# gravity and external forces (expressed in frame aligned with s but centered at body mass)
m = I_m[3, 3]
f_g = m * gravity
r_com = wp.transform_get_translation(X_sm)
f_g_s = wp.spatial_vector(wp.cross(r_com, f_g), f_g)
# body forces
I_s = spatial_transform_inertia(X_sm, I_m)
f_b_s = I_s * a_s + spatial_cross_dual(v_s, I_s * v_s)
body_v_s[child] = v_s
body_a_s[child] = a_s
body_f_s[child] = f_b_s - f_g_s
body_I_s[child] = I_s
# Inverse dynamics via Recursive Newton-Euler algorithm (Featherstone Table 5.1)
@wp.kernel
def eval_rigid_id(
articulation_start: wp.array(dtype=int),
joint_type: wp.array(dtype=int),
joint_parent: wp.array(dtype=int),
joint_child: wp.array(dtype=int),
joint_q_start: wp.array(dtype=int),
joint_qd_start: wp.array(dtype=int),
joint_q: wp.array(dtype=float),
joint_qd: wp.array(dtype=float),
joint_axis: wp.array(dtype=wp.vec3),
joint_axis_start: wp.array(dtype=int),
joint_axis_dim: wp.array(dtype=int, ndim=2),
body_I_m: wp.array(dtype=wp.spatial_matrix),
body_q: wp.array(dtype=wp.transform),
body_q_com: wp.array(dtype=wp.transform),
joint_X_p: wp.array(dtype=wp.transform),
joint_X_c: wp.array(dtype=wp.transform),
gravity: wp.vec3,
# outputs
joint_S_s: wp.array(dtype=wp.spatial_vector),
body_I_s: wp.array(dtype=wp.spatial_matrix),
body_v_s: wp.array(dtype=wp.spatial_vector),
body_f_s: wp.array(dtype=wp.spatial_vector),
body_a_s: wp.array(dtype=wp.spatial_vector),
):
# one thread per-articulation
index = wp.tid()
start = articulation_start[index]
end = articulation_start[index + 1]
# compute link velocities and coriolis forces
for i in range(start, end):
compute_link_velocity(
i,
joint_type,
joint_parent,
joint_child,
joint_q_start,
joint_qd_start,
joint_q,
joint_qd,
joint_axis,
joint_axis_start,
joint_axis_dim,
body_I_m,
body_q,
body_q_com,
joint_X_p,
joint_X_c,
gravity,
joint_S_s,
body_I_s,
body_v_s,
body_f_s,
body_a_s,
)
@wp.kernel
def eval_rigid_tau(
articulation_start: wp.array(dtype=int),
joint_type: wp.array(dtype=int),
joint_parent: wp.array(dtype=int),
joint_child: wp.array(dtype=int),
joint_q_start: wp.array(dtype=int),
joint_qd_start: wp.array(dtype=int),
joint_axis_start: wp.array(dtype=int),
joint_axis_dim: wp.array(dtype=int, ndim=2),
joint_axis_mode: wp.array(dtype=int),
joint_q: wp.array(dtype=float),
joint_qd: wp.array(dtype=float),
joint_act: wp.array(dtype=float),
joint_target_ke: wp.array(dtype=float),
joint_target_kd: wp.array(dtype=float),
joint_limit_lower: wp.array(dtype=float),
joint_limit_upper: wp.array(dtype=float),
joint_limit_ke: wp.array(dtype=float),
joint_limit_kd: wp.array(dtype=float),
joint_S_s: wp.array(dtype=wp.spatial_vector),
body_fb_s: wp.array(dtype=wp.spatial_vector),
body_f_ext: wp.array(dtype=wp.spatial_vector),
# outputs
body_ft_s: wp.array(dtype=wp.spatial_vector),
tau: wp.array(dtype=float),
):
# one thread per-articulation
index = wp.tid()
start = articulation_start[index]
end = articulation_start[index + 1]
count = end - start
# compute joint forces
for offset in range(count):
# for backwards traversal
i = end - offset - 1
type = joint_type[i]
parent = joint_parent[i]
child = joint_child[i]
dof_start = joint_qd_start[i]
coord_start = joint_q_start[i]
axis_start = joint_axis_start[i]
lin_axis_count = joint_axis_dim[i, 0]
ang_axis_count = joint_axis_dim[i, 1]
# total forces on body
f_b_s = body_fb_s[child]
f_t_s = body_ft_s[child]
f_ext = body_f_ext[child]
f_s = f_b_s + f_t_s + f_ext
# compute joint-space forces, writes out tau
jcalc_tau(
type,
joint_target_ke,
joint_target_kd,
joint_limit_ke,
joint_limit_kd,
joint_S_s,
joint_q,
joint_qd,
joint_act,
joint_axis_mode,
joint_limit_lower,
joint_limit_upper,
coord_start,
dof_start,
axis_start,
lin_axis_count,
ang_axis_count,
f_s,
tau,
)
# update parent forces, todo: check that this is valid for the backwards pass
if parent >= 0:
wp.atomic_add(body_ft_s, parent, f_s)
# builds spatial Jacobian J which is an (joint_count*6)x(dof_count) matrix
@wp.kernel
def eval_rigid_jacobian(
articulation_start: wp.array(dtype=int),
articulation_J_start: wp.array(dtype=int),
joint_parent: wp.array(dtype=int),
joint_qd_start: wp.array(dtype=int),
joint_S_s: wp.array(dtype=wp.spatial_vector),
# outputs
J: wp.array(dtype=float),
):
# one thread per-articulation
index = wp.tid()
joint_start = articulation_start[index]
joint_end = articulation_start[index + 1]
joint_count = joint_end - joint_start
J_offset = articulation_J_start[index]
articulation_dof_start = joint_qd_start[joint_start]
articulation_dof_end = joint_qd_start[joint_end]
articulation_dof_count = articulation_dof_end - articulation_dof_start
for i in range(joint_count):
row_start = i * 6
j = joint_start + i
while j != -1:
joint_dof_start = joint_qd_start[j]
joint_dof_end = joint_qd_start[j + 1]
joint_dof_count = joint_dof_end - joint_dof_start
# fill out each row of the Jacobian walking up the tree
for dof in range(joint_dof_count):
col = (joint_dof_start - articulation_dof_start) + dof
S = joint_S_s[joint_dof_start + dof]
for k in range(6):
J[J_offset + dense_index(articulation_dof_count, row_start + k, col)] = S[k]
j = joint_parent[j]
@wp.func
def spatial_mass(
body_I_s: wp.array(dtype=wp.spatial_matrix),
joint_start: int,
joint_count: int,
M_start: int,
# outputs
M: wp.array(dtype=float),
):
stride = joint_count * 6
for l in range(joint_count):
I = body_I_s[joint_start + l]
for i in range(6):
for j in range(6):
M[M_start + dense_index(stride, l * 6 + i, l * 6 + j)] = I[i, j]
@wp.kernel
def eval_rigid_mass(
articulation_start: wp.array(dtype=int),
articulation_M_start: wp.array(dtype=int),
body_I_s: wp.array(dtype=wp.spatial_matrix),
# outputs
M: wp.array(dtype=float),
):
# one thread per-articulation
index = wp.tid()
joint_start = articulation_start[index]
joint_end = articulation_start[index + 1]
joint_count = joint_end - joint_start
M_offset = articulation_M_start[index]
spatial_mass(body_I_s, joint_start, joint_count, M_offset, M)
@wp.func
def dense_gemm(
m: int,
n: int,
p: int,
transpose_A: bool,
transpose_B: bool,
add_to_C: bool,
A_start: int,
B_start: int,
C_start: int,
A: wp.array(dtype=float),
B: wp.array(dtype=float),
# outputs
C: wp.array(dtype=float),
):
# multiply a `m x p` matrix A by a `p x n` matrix B to produce a `m x n` matrix C
for i in range(m):
for j in range(n):
sum = float(0.0)
for k in range(p):
if transpose_A:
a_i = k * m + i
else:
a_i = i * p + k
if transpose_B:
b_j = j * p + k
else:
b_j = k * n + j
sum += A[A_start + a_i] * B[B_start + b_j]
if add_to_C:
C[C_start + i * n + j] += sum
else:
C[C_start + i * n + j] = sum
# @wp.func_grad(dense_gemm)
# def adj_dense_gemm(
# m: int,
# n: int,
# p: int,
# transpose_A: bool,
# transpose_B: bool,
# add_to_C: bool,
# A_start: int,
# B_start: int,
# C_start: int,
# A: wp.array(dtype=float),
# B: wp.array(dtype=float),
# # outputs
# C: wp.array(dtype=float),
# ):
# add_to_C = True
# if transpose_A:
# dense_gemm(p, m, n, False, True, add_to_C, A_start, B_start, C_start, B, wp.adjoint[C], wp.adjoint[A])
# dense_gemm(p, n, m, False, False, add_to_C, A_start, B_start, C_start, A, wp.adjoint[C], wp.adjoint[B])
# else:
# dense_gemm(
# m, p, n, False, not transpose_B, add_to_C, A_start, B_start, C_start, wp.adjoint[C], B, wp.adjoint[A]
# )
# dense_gemm(p, n, m, True, False, add_to_C, A_start, B_start, C_start, A, wp.adjoint[C], wp.adjoint[B])
@wp.kernel
def eval_dense_gemm_batched(
m: wp.array(dtype=int),
n: wp.array(dtype=int),
p: wp.array(dtype=int),
transpose_A: bool,
transpose_B: bool,
A_start: wp.array(dtype=int),
B_start: wp.array(dtype=int),
C_start: wp.array(dtype=int),
A: wp.array(dtype=float),
B: wp.array(dtype=float),
C: wp.array(dtype=float),
):
# on the CPU each thread computes the whole matrix multiply
# on the GPU each block computes the multiply with one output per-thread
batch = wp.tid() # /kNumThreadsPerBlock;
add_to_C = False
dense_gemm(
m[batch],
n[batch],
p[batch],
transpose_A,
transpose_B,
add_to_C,
A_start[batch],
B_start[batch],
C_start[batch],
A,
B,
C,
)
@wp.func
def dense_cholesky(
n: int,
A: wp.array(dtype=float),
R: wp.array(dtype=float),
A_start: int,
R_start: int,
# outputs
L: wp.array(dtype=float),
):
# compute the Cholesky factorization of A = L L^T with diagonal regularization R
for j in range(n):
s = A[A_start + dense_index(n, j, j)] + R[R_start + j]
for k in range(j):
r = L[A_start + dense_index(n, j, k)]
s -= r * r
s = wp.sqrt(s)
invS = 1.0 / s
L[A_start + dense_index(n, j, j)] = s
for i in range(j + 1, n):
s = A[A_start + dense_index(n, i, j)]
for k in range(j):
s -= L[A_start + dense_index(n, i, k)] * L[A_start + dense_index(n, j, k)]
L[A_start + dense_index(n, i, j)] = s * invS
@wp.func_grad(dense_cholesky)
def adj_dense_cholesky(
n: int,
A: wp.array(dtype=float),
R: wp.array(dtype=float),
A_start: int,
R_start: int,
# outputs
L: wp.array(dtype=float),
):
# nop, use dense_solve to differentiate through (A^-1)b = x
pass
@wp.kernel
def eval_dense_cholesky_batched(
A_starts: wp.array(dtype=int),
A_dim: wp.array(dtype=int),
A: wp.array(dtype=float),
R: wp.array(dtype=float),
L: wp.array(dtype=float),
):
batch = wp.tid()
n = A_dim[batch]
A_start = A_starts[batch]
R_start = n * batch
dense_cholesky(n, A, R, A_start, R_start, L)
@wp.func
def dense_subs(
n: int,
L_start: int,
b_start: int,
L: wp.array(dtype=float),
b: wp.array(dtype=float),
# outputs
x: wp.array(dtype=float),
):
# Solves (L L^T) x = b for x given the Cholesky factor L
# forward substitution solves the lower triangular system L y = b for y
for i in range(n):
s = b[b_start + i]
for j in range(i):
s -= L[L_start + dense_index(n, i, j)] * x[b_start + j]
x[b_start + i] = s / L[L_start + dense_index(n, i, i)]
# backward substitution solves the upper triangular system L^T x = y for x
for i in range(n - 1, -1, -1):
s = x[b_start + i]
for j in range(i + 1, n):
s -= L[L_start + dense_index(n, j, i)] * x[b_start + j]
x[b_start + i] = s / L[L_start + dense_index(n, i, i)]
@wp.func
def dense_solve(
n: int,
L_start: int,
b_start: int,
L: wp.array(dtype=float),
b: wp.array(dtype=float),
# outputs
x: wp.array(dtype=float),
tmp: wp.array(dtype=float),
):
# helper function to include tmp argument for backward pass
dense_subs(n, L_start, b_start, L, b, x)
@wp.func_grad(dense_solve)
def adj_dense_solve(
n: int,
L_start: int,
b_start: int,
L: wp.array(dtype=float),
b: wp.array(dtype=float),
# outputs
x: wp.array(dtype=float),
tmp: wp.array(dtype=float),
):
if not tmp or not wp.adjoint[x] or not wp.adjoint[L]:
return
for i in range(n):
tmp[b_start + i] = 0.0
dense_subs(n, L_start, b_start, L, wp.adjoint[x], tmp)
for i in range(n):
wp.adjoint[b][b_start + i] += tmp[b_start + i]
# A* = -adj_b*x^T
for i in range(n):
for j in range(n):
wp.adjoint[L][L_start + dense_index(n, i, j)] += -tmp[b_start + i] * x[b_start + j]
@wp.kernel
def eval_dense_solve_batched(
L_start: wp.array(dtype=int),
L_dim: wp.array(dtype=int),
b_start: wp.array(dtype=int),
L: wp.array(dtype=float),
b: wp.array(dtype=float),
# outputs
x: wp.array(dtype=float),
tmp: wp.array(dtype=float),
):
batch = wp.tid()
dense_solve(L_dim[batch], L_start[batch], b_start[batch], L, b, x, tmp)
@wp.kernel
def integrate_generalized_joints(
joint_type: wp.array(dtype=int),
joint_q_start: wp.array(dtype=int),
joint_qd_start: wp.array(dtype=int),
joint_axis_dim: wp.array(dtype=int, ndim=2),
joint_q: wp.array(dtype=float),
joint_qd: wp.array(dtype=float),
joint_qdd: wp.array(dtype=float),
dt: float,
# outputs
joint_q_new: wp.array(dtype=float),
joint_qd_new: wp.array(dtype=float),
):
# one thread per-articulation
index = wp.tid()
type = joint_type[index]
coord_start = joint_q_start[index]
dof_start = joint_qd_start[index]
lin_axis_count = joint_axis_dim[index, 0]
ang_axis_count = joint_axis_dim[index, 1]
jcalc_integrate(
type,
joint_q,
joint_qd,
joint_qdd,
coord_start,
dof_start,
lin_axis_count,
ang_axis_count,
dt,
joint_q_new,
joint_qd_new,
)
class FeatherstoneIntegrator(Integrator):
"""A semi-implicit integrator using symplectic Euler that operates
on reduced (also called generalized) coordinates to simulate articulated rigid body dynamics
based on Featherstone's composite rigid body algorithm (CRBA).
See: Featherstone, Roy. Rigid Body Dynamics Algorithms. Springer US, 2014.
Instead of maximal coordinates :attr:`State.body_q` (rigid body positions) and :attr:`State.body_qd`
(rigid body velocities) as is the case :class:`SemiImplicitIntegrator`, :class:`FeatherstoneIntegrator`
uses :attr:`State.joint_q` and :attr:`State.joint_qd` to represent the positions and velocities of
joints without allowing any redundant degrees of freedom.
After constructing :class:`Model` and :class:`State` objects this time-integrator
may be used to advance the simulation state forward in time.
Note:
Unlike :class:`SemiImplicitIntegrator` and :class:`XPBDIntegrator`, :class:`FeatherstoneIntegrator` does not simulate rigid bodies with nonzero mass as floating bodies if they are not connected through any joints. Floating-base systems require an explicit free joint with which the body is connected to the world, see :meth:`ModelBuilder.add_joint_free`.
Semi-implicit time integration is a variational integrator that
preserves energy, however it not unconditionally stable, and requires a time-step
small enough to support the required stiffness and damping forces.
See: https://en.wikipedia.org/wiki/Semi-implicit_Euler_method
Example
-------
.. code-block:: python
integrator = wp.FeatherstoneIntegrator(model)
# simulation loop
for i in range(100):
state = integrator.simulate(model, state_in, state_out, dt)
Note:
The :class:`FeatherstoneIntegrator` requires the :class:`Model` to be passed in as a constructor argument.
"""
def __init__(self, model, angular_damping=0.05, update_mass_matrix_every=1):
"""
Args:
model (Model): the model to be simulated.
angular_damping (float, optional): Angular damping factor. Defaults to 0.05.
update_mass_matrix_every (int, optional): How often to update the mass matrix (every n-th time the :meth:`simulate` function gets called). Defaults to 1.
"""
self.angular_damping = angular_damping
self.update_mass_matrix_every = update_mass_matrix_every
self.compute_articulation_indices(model)
self.allocate_model_aux_vars(model)
self._step = 0
def compute_articulation_indices(self, model):
# calculate total size and offsets of Jacobian and mass matrices for entire system
if model.joint_count:
self.J_size = 0
self.M_size = 0
self.H_size = 0
articulation_J_start = []
articulation_M_start = []
articulation_H_start = []
articulation_M_rows = []
articulation_H_rows = []
articulation_J_rows = []
articulation_J_cols = []
articulation_dof_start = []
articulation_coord_start = []
articulation_start = model.articulation_start.numpy()
joint_q_start = model.joint_q_start.numpy()
joint_qd_start = model.joint_qd_start.numpy()
for i in range(model.articulation_count):
first_joint = articulation_start[i]
last_joint = articulation_start[i + 1]
first_coord = joint_q_start[first_joint]
first_dof = joint_qd_start[first_joint]
last_dof = joint_qd_start[last_joint]
joint_count = last_joint - first_joint
dof_count = last_dof - first_dof
articulation_J_start.append(self.J_size)
articulation_M_start.append(self.M_size)
articulation_H_start.append(self.H_size)
articulation_dof_start.append(first_dof)
articulation_coord_start.append(first_coord)
# bit of data duplication here, but will leave it as such for clarity
articulation_M_rows.append(joint_count * 6)
articulation_H_rows.append(dof_count)
articulation_J_rows.append(joint_count * 6)
articulation_J_cols.append(dof_count)
self.J_size += 6 * joint_count * dof_count
self.M_size += 6 * joint_count * 6 * joint_count
self.H_size += dof_count * dof_count
# matrix offsets for batched gemm
self.articulation_J_start = wp.array(articulation_J_start, dtype=wp.int32, device=model.device)
self.articulation_M_start = wp.array(articulation_M_start, dtype=wp.int32, device=model.device)
self.articulation_H_start = wp.array(articulation_H_start, dtype=wp.int32, device=model.device)
self.articulation_M_rows = wp.array(articulation_M_rows, dtype=wp.int32, device=model.device)
self.articulation_H_rows = wp.array(articulation_H_rows, dtype=wp.int32, device=model.device)
self.articulation_J_rows = wp.array(articulation_J_rows, dtype=wp.int32, device=model.device)
self.articulation_J_cols = wp.array(articulation_J_cols, dtype=wp.int32, device=model.device)
self.articulation_dof_start = wp.array(articulation_dof_start, dtype=wp.int32, device=model.device)
self.articulation_coord_start = wp.array(articulation_coord_start, dtype=wp.int32, device=model.device)
def allocate_model_aux_vars(self, model):
# allocate mass, Jacobian matrices, and other auxiliary variables pertaining to the model
if model.joint_count:
# system matrices
self.M = wp.zeros((self.M_size,), dtype=wp.float32, device=model.device, requires_grad=model.requires_grad)
self.J = wp.zeros((self.J_size,), dtype=wp.float32, device=model.device, requires_grad=model.requires_grad)
self.P = wp.empty_like(self.J, requires_grad=model.requires_grad)
self.H = wp.empty((self.H_size,), dtype=wp.float32, device=model.device, requires_grad=model.requires_grad)
# zero since only upper triangle is set which can trigger NaN detection
self.L = wp.zeros_like(self.H)
if model.body_count:
# TODO use requires_grad here?
self.body_I_m = wp.empty(
(model.body_count,), dtype=wp.spatial_matrix, device=model.device, requires_grad=model.requires_grad
)
wp.launch(
compute_spatial_inertia,
model.body_count,
inputs=[model.body_inertia, model.body_mass],
outputs=[self.body_I_m],
device=model.device,
)
self.body_X_com = wp.empty(
(model.body_count,), dtype=wp.transform, device=model.device, requires_grad=model.requires_grad
)
wp.launch(
compute_com_transforms,
model.body_count,
inputs=[model.body_com],
outputs=[self.body_X_com],
device=model.device,
)
def allocate_state_aux_vars(self, model, target, requires_grad):
# allocate auxiliary variables that vary with state
if model.body_count:
# joints
target.joint_qdd = wp.zeros_like(model.joint_qd, requires_grad=requires_grad)
target.joint_tau = wp.empty_like(model.joint_qd, requires_grad=requires_grad)
if requires_grad:
# used in the custom grad implementation of eval_dense_solve_batched
target.joint_solve_tmp = wp.zeros_like(model.joint_qd, requires_grad=True)
else:
target.joint_solve_tmp = None
target.joint_S_s = wp.empty(
(model.joint_dof_count,),
dtype=wp.spatial_vector,
device=model.device,
requires_grad=requires_grad,
)
# derived rigid body data (maximal coordinates)
target.body_q_com = wp.empty_like(model.body_q, requires_grad=requires_grad)
target.body_I_s = wp.empty(
(model.body_count,), dtype=wp.spatial_matrix, device=model.device, requires_grad=requires_grad
)
target.body_v_s = wp.empty(
(model.body_count,), dtype=wp.spatial_vector, device=model.device, requires_grad=requires_grad
)
target.body_a_s = wp.empty(
(model.body_count,), dtype=wp.spatial_vector, device=model.device, requires_grad=requires_grad
)
target.body_f_s = wp.zeros(
(model.body_count,), dtype=wp.spatial_vector, device=model.device, requires_grad=requires_grad
)
target.body_ft_s = wp.zeros(
(model.body_count,), dtype=wp.spatial_vector, device=model.device, requires_grad=requires_grad
)
target._featherstone_augmented = True
def simulate(self, model: Model, state_in: State, state_out: State, dt: float, control: Control = None):
requires_grad = state_in.requires_grad
# optionally create dynamical auxiliary variables
if requires_grad:
state_aug = state_out
else:
state_aug = self
if not getattr(state_aug, "_featherstone_augmented", False):
self.allocate_state_aux_vars(model, state_aug, requires_grad)
if control is None:
control = model.control(clone_variables=False)
with wp.ScopedTimer("simulate", False):
particle_f = None
body_f = None
if state_in.particle_count:
particle_f = state_in.particle_f
if state_in.body_count:
body_f = state_in.body_f
# damped springs
eval_spring_forces(model, state_in, particle_f)
# triangle elastic and lift/drag forces
eval_triangle_forces(model, state_in, control, particle_f)
# triangle/triangle contacts
eval_triangle_contact_forces(model, state_in, particle_f)
# triangle bending
eval_bending_forces(model, state_in, particle_f)
# tetrahedral FEM
eval_tetrahedral_forces(model, state_in, control, particle_f)
# particle-particle interactions
eval_particle_forces(model, state_in, particle_f)
# particle ground contacts
eval_particle_ground_contact_forces(model, state_in, particle_f)
# particle shape contact
eval_particle_body_contact_forces(model, state_in, particle_f, body_f)
# muscles
if False:
eval_muscle_forces(model, state_in, control, body_f)
# ----------------------------
# articulations
if model.joint_count:
# evaluate body transforms
wp.launch(
eval_rigid_fk,
dim=model.articulation_count,
inputs=[
model.articulation_start,
model.joint_type,
model.joint_parent,
model.joint_child,
model.joint_q_start,
state_in.joint_q,
model.joint_X_p,
model.joint_X_c,
self.body_X_com,
model.joint_axis,
model.joint_axis_start,
model.joint_axis_dim,
],
outputs=[state_in.body_q, state_aug.body_q_com],
device=model.device,
)
# print("body_X_sc:")
# print(state_in.body_q.numpy())
# evaluate joint inertias, motion vectors, and forces
state_aug.body_f_s.zero_()
wp.launch(
eval_rigid_id,
dim=model.articulation_count,
inputs=[
model.articulation_start,
model.joint_type,
model.joint_parent,
model.joint_child,
model.joint_q_start,
model.joint_qd_start,
state_in.joint_q,
state_in.joint_qd,
model.joint_axis,
model.joint_axis_start,
model.joint_axis_dim,
self.body_I_m,
state_in.body_q,
state_aug.body_q_com,
model.joint_X_p,
model.joint_X_c,
model.gravity,
],
outputs=[
state_aug.joint_S_s,
state_aug.body_I_s,
state_aug.body_v_s,
state_aug.body_f_s,
state_aug.body_a_s,
],
device=model.device,
)
if model.rigid_contact_max and (
model.ground and model.shape_ground_contact_pair_count or model.shape_contact_pair_count
):
wp.launch(
kernel=eval_rigid_contacts,
dim=model.rigid_contact_max,
inputs=[
state_in.body_q,
state_aug.body_v_s,
model.body_com,
model.shape_materials,
model.shape_geo,
model.shape_body,
model.rigid_contact_count,
model.rigid_contact_point0,
model.rigid_contact_point1,
model.rigid_contact_normal,
model.rigid_contact_shape0,
model.rigid_contact_shape1,
True,
],
outputs=[body_f],
device=model.device,
)
# if model.rigid_contact_count.numpy()[0] > 0:
# print(body_f.numpy())
if model.articulation_count:
# evaluate joint torques
state_aug.body_ft_s.zero_()
wp.launch(
eval_rigid_tau,
dim=model.articulation_count,
inputs=[
model.articulation_start,
model.joint_type,
model.joint_parent,
model.joint_child,
model.joint_q_start,
model.joint_qd_start,
model.joint_axis_start,
model.joint_axis_dim,
model.joint_axis_mode,
state_in.joint_q,
state_in.joint_qd,
control.joint_act,
model.joint_target_ke,
model.joint_target_kd,
model.joint_limit_lower,
model.joint_limit_upper,
model.joint_limit_ke,
model.joint_limit_kd,
state_aug.joint_S_s,
state_aug.body_f_s,
body_f,
],
outputs=[
state_aug.body_ft_s,
state_aug.joint_tau,
],
device=model.device,
)
# print("joint_tau:")
# print(state_aug.joint_tau.numpy())
# print("body_q:")
# print(state_in.body_q.numpy())
# print("body_qd:")
# print(state_in.body_qd.numpy())
if self._step % self.update_mass_matrix_every == 0:
# build J
wp.launch(
eval_rigid_jacobian,
dim=model.articulation_count,
inputs=[
model.articulation_start,
self.articulation_J_start,
model.joint_parent,
model.joint_qd_start,
state_aug.joint_S_s,
],
outputs=[self.J],
device=model.device,
)
# build M
wp.launch(
eval_rigid_mass,
dim=model.articulation_count,
inputs=[
model.articulation_start,
self.articulation_M_start,
state_aug.body_I_s,
],
outputs=[self.M],
device=model.device,
)
# form P = M*J
wp.launch(
eval_dense_gemm_batched,
dim=model.articulation_count,
inputs=[
self.articulation_M_rows,
self.articulation_J_cols,
self.articulation_J_rows,
False,
False,
self.articulation_M_start,
self.articulation_J_start,
# P start is the same as J start since it has the same dims as J
self.articulation_J_start,
self.M,
self.J,
],
outputs=[self.P],
device=model.device,
)
# form H = J^T*P
wp.launch(
eval_dense_gemm_batched,
dim=model.articulation_count,
inputs=[
self.articulation_J_cols,
self.articulation_J_cols,
# P rows is the same as J rows
self.articulation_J_rows,
True,
False,
self.articulation_J_start,
# P start is the same as J start since it has the same dims as J
self.articulation_J_start,
self.articulation_H_start,
self.J,
self.P,
],
outputs=[self.H],
device=model.device,
)
# compute decomposition
wp.launch(
eval_dense_cholesky_batched,
dim=model.articulation_count,
inputs=[
self.articulation_H_start,
self.articulation_H_rows,
self.H,
model.joint_armature,
],
outputs=[self.L],
device=model.device,
)
# print("joint_act:")
# print(control.joint_act.numpy())
# print("joint_tau:")
# print(state_aug.joint_tau.numpy())
# print("H:")
# print(self.H.numpy())
# print("L:")
# print(self.L.numpy())
# solve for qdd
state_aug.joint_qdd.zero_()
wp.launch(
eval_dense_solve_batched,
dim=model.articulation_count,
inputs=[
self.articulation_H_start,
self.articulation_H_rows,
self.articulation_dof_start,
self.L,
state_aug.joint_tau,
],
outputs=[
state_aug.joint_qdd,
state_aug.joint_solve_tmp,
],
device=model.device,
)
# if wp.context.runtime.tape:
# wp.context.runtime.tape.record_func(
# backward=lambda: adj_matmul(
# a, b, c, a.grad, b.grad, c.grad, d.grad, alpha, beta, allow_tf32x3_arith, device
# ),
# arrays=[a, b, c, d],
# )
# print("joint_qdd:")
# print(state_aug.joint_qdd.numpy())
# print("\n\n")
# -------------------------------------
# integrate bodies
if model.joint_count:
wp.launch(
kernel=integrate_generalized_joints,
dim=model.joint_count,
inputs=[
model.joint_type,
model.joint_q_start,
model.joint_qd_start,
model.joint_axis_dim,
state_in.joint_q,
state_in.joint_qd,
state_aug.joint_qdd,
dt,
],
outputs=[state_out.joint_q, state_out.joint_qd],
device=model.device,
)
# update maximal coordinates
eval_fk(model, state_out.joint_q, state_out.joint_qd, None, state_out)
self.integrate_particles(model, state_in, state_out, dt)
self._step += 1
return state_out
| 64,584 | Python | 32.796442 | 362 | 0.522033 |
NVIDIA/warp/warp/sim/import_snu.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import xml.etree.ElementTree as ET
import numpy as np
import warp as wp
# SNU file format parser
class MuscleUnit:
def __init__(self):
self.name = ""
self.bones = []
self.points = []
class Skeleton:
def __init__(self, root_xform, skeleton_file, muscle_file, builder, filter, armature=0.0):
self.parse_skeleton(skeleton_file, builder, filter, root_xform, armature)
self.parse_muscles(muscle_file, builder)
def parse_skeleton(self, filename, builder, filter, root_xform, armature):
file = ET.parse(filename)
root = file.getroot()
self.node_map = {} # map node names to link indices
self.xform_map = {} # map node names to parent transforms
self.mesh_map = {} # map mesh names to link indices objects
self.coord_start = builder.joint_coord_count
self.dof_start = builder.joint_dof_count
type_map = {
"Ball": wp.sim.JOINT_BALL,
"Revolute": wp.sim.JOINT_REVOLUTE,
"Prismatic": wp.sim.JOINT_PRISMATIC,
"Free": wp.sim.JOINT_FREE,
"Fixed": wp.sim.JOINT_FIXED,
}
builder.add_articulation()
for child in root:
if child.tag == "Node":
body = child.find("Body")
joint = child.find("Joint")
name = child.attrib["name"]
parent = child.attrib["parent"]
parent_X_s = wp.transform_identity()
if parent in self.node_map:
parent_link = self.node_map[parent]
parent_X_s = self.xform_map[parent]
else:
parent_link = -1
body_xform = body.find("Transformation")
joint_xform = joint.find("Transformation")
body_mesh = body.attrib["obj"]
body_size = np.fromstring(body.attrib["size"], sep=" ")
# body_type = body.attrib["type"]
# body_mass = body.attrib["mass"]
body_R_s = np.fromstring(body_xform.attrib["linear"], sep=" ").reshape((3, 3))
body_t_s = np.fromstring(body_xform.attrib["translation"], sep=" ")
joint_R_s = np.fromstring(joint_xform.attrib["linear"], sep=" ").reshape((3, 3))
joint_t_s = np.fromstring(joint_xform.attrib["translation"], sep=" ")
joint_type = type_map[joint.attrib["type"]]
joint_lower = np.array([-1.0e3])
joint_upper = np.array([1.0e3])
try:
joint_lower = np.fromstring(joint.attrib["lower"], sep=" ")
joint_upper = np.fromstring(joint.attrib["upper"], sep=" ")
except Exception:
pass
if "axis" in joint.attrib:
joint_axis = np.fromstring(joint.attrib["axis"], sep=" ")
else:
joint_axis = np.array((0.0, 0.0, 0.0))
body_X_s = wp.transform(body_t_s, wp.quat_from_matrix(body_R_s))
joint_X_s = wp.transform(joint_t_s, wp.quat_from_matrix(joint_R_s))
mesh_base = os.path.splitext(body_mesh)[0]
# mesh_file = mesh_base + ".usd"
# -----------------------------------
# one time conversion, put meshes into local body space (and meter units)
# stage = Usd.Stage.Open("./assets/snu/OBJ/" + mesh_file)
# geom = UsdGeom.Mesh.Get(stage, "/" + mesh_base + "_obj/defaultobject/defaultobject")
# body_X_bs = wp.transform_inverse(body_X_s)
# joint_X_bs = wp.transform_inverse(joint_X_s)
# points = geom.GetPointsAttr().Get()
# for i in range(len(points)):
# p = wp.transform_point(joint_X_bs, points[i]*0.01)
# points[i] = Gf.Vec3f(p.tolist()) # cm -> meters
# geom.GetPointsAttr().Set(points)
# extent = UsdGeom.Boundable.ComputeExtentFromPlugins(geom, 0.0)
# geom.GetExtentAttr().Set(extent)
# stage.Save()
# --------------------------------------
link = -1
if len(filter) == 0 or name in filter:
joint_X_p = wp.transform_multiply(wp.transform_inverse(parent_X_s), joint_X_s)
body_X_c = wp.transform_multiply(wp.transform_inverse(joint_X_s), body_X_s)
if parent_link == -1:
joint_X_p = wp.transform_identity()
# add link
link = builder.add_body(
parent=parent_link,
origin=wp.transform_multiply(root_xform, joint_X_s),
joint_xform=joint_X_p,
joint_axis=joint_axis,
joint_type=joint_type,
joint_target_ke=5.0,
joint_target_kd=2.0,
joint_limit_lower=joint_lower[0],
joint_limit_upper=joint_upper[0],
joint_limit_ke=1.0e3,
joint_limit_kd=1.0e2,
joint_armature=armature,
)
# add shape
builder.add_shape_box(
body=link,
pos=body_X_c.p,
rot=body_X_c.q,
hx=body_size[0] * 0.5,
hy=body_size[1] * 0.5,
hz=body_size[2] * 0.5,
ke=1.0e3 * 5.0,
kd=1.0e2 * 2.0,
kf=1.0e3,
mu=0.5,
)
# add lookup in name->link map
# save parent transform
self.xform_map[name] = joint_X_s
self.node_map[name] = link
self.mesh_map[mesh_base] = link
def parse_muscles(self, filename, builder):
# list of MuscleUnits
muscles = []
file = ET.parse(filename)
root = file.getroot()
self.muscle_start = len(builder.muscle_activation)
for child in root:
if child.tag == "Unit":
unit_name = child.attrib["name"]
unit_f0 = float(child.attrib["f0"])
unit_lm = float(child.attrib["lm"])
unit_lt = float(child.attrib["lt"])
unit_lmax = float(child.attrib["lmax"])
unit_pen = float(child.attrib["pen_angle"])
m = MuscleUnit()
m.name = unit_name
incomplete = False
for waypoint in child.iter("Waypoint"):
way_bone = waypoint.attrib["body"]
way_link = self.node_map[way_bone]
way_loc = np.fromstring(waypoint.attrib["p"], sep=" ", dtype=np.float32)
if way_link == -1:
incomplete = True
break
# transform loc to joint local space
joint_X_s = self.xform_map[way_bone]
way_loc = wp.transform_point(wp.transform_inverse(joint_X_s), way_loc)
m.bones.append(way_link)
m.points.append(way_loc)
if not incomplete:
muscles.append(m)
builder.add_muscle(
m.bones, m.points, f0=unit_f0, lm=unit_lm, lt=unit_lt, lmax=unit_lmax, pen=unit_pen
)
self.muscles = muscles
def parse_snu(root_xform, skeleton_file, muscle_file, builder, filter, armature=0.0):
return Skeleton(root_xform, skeleton_file, muscle_file, builder, filter, armature=0.0)
| 8,339 | Python | 36.737556 | 107 | 0.491786 |
NVIDIA/warp/warp/sim/import_mjcf.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import os
import re
import xml.etree.ElementTree as ET
import numpy as np
import warp as wp
def parse_mjcf(
mjcf_filename,
builder,
xform=None,
density=1000.0,
stiffness=0.0,
damping=0.0,
contact_ke=1000.0,
contact_kd=100.0,
contact_kf=100.0,
contact_ka=0.0,
contact_mu=0.5,
contact_restitution=0.5,
contact_thickness=0.0,
limit_ke=100.0,
limit_kd=10.0,
scale=1.0,
armature=0.0,
armature_scale=1.0,
parse_meshes=True,
enable_self_collisions=False,
up_axis="Z",
ignore_classes=None,
collapse_fixed_joints=False,
):
"""
Parses MuJoCo XML (MJCF) file and adds the bodies and joints to the given ModelBuilder.
Args:
mjcf_filename (str): The filename of the MuJoCo file to parse.
builder (ModelBuilder): The :class:`ModelBuilder` to add the bodies and joints to.
xform (:ref:`transform <transform>`): The transform to apply to the imported mechanism.
density (float): The density of the shapes in kg/m^3 which will be used to calculate the body mass and inertia.
stiffness (float): The stiffness of the joints.
damping (float): The damping of the joints.
contact_ke (float): The stiffness of the shape contacts.
contact_kd (float): The damping of the shape contacts.
contact_kf (float): The friction stiffness of the shape contacts.
contact_ka (float): The adhesion distance of the shape contacts.
contact_mu (float): The friction coefficient of the shape contacts.
contact_restitution (float): The restitution coefficient of the shape contacts.
contact_thickness (float): The thickness to add to the shape geometry.
limit_ke (float): The stiffness of the joint limits.
limit_kd (float): The damping of the joint limits.
scale (float): The scaling factor to apply to the imported mechanism.
armature (float): Default joint armature to use if `armature` has not been defined for a joint in the MJCF.
armature_scale (float): Scaling factor to apply to the MJCF-defined joint armature values.
parse_meshes (bool): Whether geometries of type `"mesh"` should be parsed. If False, geometries of type `"mesh"` are ignored.
enable_self_collisions (bool): If True, self-collisions are enabled.
up_axis (str): The up axis of the mechanism. Can be either `"X"`, `"Y"` or `"Z"`. The default is `"Z"`.
ignore_classes (List[str]): A list of regular expressions. Bodies and joints with a class matching one of the regular expressions will be ignored.
collapse_fixed_joints (bool): If True, fixed joints are removed and the respective bodies are merged.
Note:
The inertia and masses of the bodies are calculated from the shape geometry and the given density. The values defined in the MJCF are not respected at the moment.
The handling of advanced features, such as MJCF classes, is still experimental.
"""
if xform is None:
xform = wp.transform()
if ignore_classes is None:
ignore_classes = []
mjcf_dirname = os.path.dirname(mjcf_filename)
file = ET.parse(mjcf_filename)
root = file.getroot()
contact_vars = {
"ke": contact_ke,
"kd": contact_kd,
"kf": contact_kf,
"ka": contact_ka,
"mu": contact_mu,
"restitution": contact_restitution,
"thickness": contact_thickness,
}
use_degrees = True # angles are in degrees by default
euler_seq = [1, 2, 3] # XYZ by default
compiler = root.find("compiler")
if compiler is not None:
use_degrees = compiler.attrib.get("angle", "degree").lower() == "degree"
euler_seq = ["xyz".index(c) + 1 for c in compiler.attrib.get("eulerseq", "xyz").lower()]
mesh_dir = compiler.attrib.get("meshdir", ".")
mesh_assets = {}
for asset in root.findall("asset"):
for mesh in asset.findall("mesh"):
if "file" in mesh.attrib:
fname = os.path.join(mesh_dir, mesh.attrib["file"])
# handle stl relative paths
if not os.path.isabs(fname):
fname = os.path.abspath(os.path.join(mjcf_dirname, fname))
if "name" in mesh.attrib:
mesh_assets[mesh.attrib["name"]] = fname
else:
name = ".".join(os.path.basename(fname).split(".")[:-1])
mesh_assets[name] = fname
class_parent = {}
class_children = {}
class_defaults = {"__all__": {}}
def get_class(element):
return element.get("class", "__all__")
def parse_default(node, parent):
nonlocal class_parent
nonlocal class_children
nonlocal class_defaults
class_name = "__all__"
if "class" in node.attrib:
class_name = node.attrib["class"]
class_parent[class_name] = parent
parent = parent or "__all__"
if parent not in class_children:
class_children[parent] = []
class_children[parent].append(class_name)
if class_name not in class_defaults:
class_defaults[class_name] = {}
for child in node:
if child.tag == "default":
parse_default(child, node.get("class"))
else:
class_defaults[class_name][child.tag] = child.attrib
for default in root.findall("default"):
parse_default(default, None)
def merge_attrib(default_attrib: dict, incoming_attrib: dict):
attrib = default_attrib.copy()
attrib.update(incoming_attrib)
return attrib
if isinstance(up_axis, str):
up_axis = "XYZ".index(up_axis.upper())
sqh = np.sqrt(0.5)
if up_axis == 0:
xform = wp.transform(xform.p, wp.quat(0.0, 0.0, -sqh, sqh) * xform.q)
elif up_axis == 2:
xform = wp.transform(xform.p, wp.quat(sqh, 0.0, 0.0, -sqh) * xform.q)
# do not apply scaling to the root transform
xform = wp.transform(np.array(xform.p) / scale, xform.q)
def parse_float(attrib, key, default):
if key in attrib:
return float(attrib[key])
else:
return default
def parse_vec(attrib, key, default):
if key in attrib:
out = np.fromstring(attrib[key], sep=" ", dtype=np.float32)
else:
out = np.array(default, dtype=np.float32)
length = len(out)
if length == 1:
return wp.vec(len(default), wp.float32)(out[0], out[0], out[0])
return wp.vec(length, wp.float32)(out)
def parse_orientation(attrib):
if "quat" in attrib:
wxyz = np.fromstring(attrib["quat"], sep=" ")
return wp.normalize(wp.quat(*wxyz[1:], wxyz[0]))
if "euler" in attrib:
euler = np.fromstring(attrib["euler"], sep=" ")
if use_degrees:
euler *= np.pi / 180
return wp.quat_from_euler(euler, *euler_seq)
if "axisangle" in attrib:
axisangle = np.fromstring(attrib["axisangle"], sep=" ")
angle = axisangle[3]
if use_degrees:
angle *= np.pi / 180
axis = wp.normalize(wp.vec3(*axisangle[:3]))
return wp.quat_from_axis_angle(axis, angle)
if "xyaxes" in attrib:
xyaxes = np.fromstring(attrib["xyaxes"], sep=" ")
xaxis = wp.normalize(wp.vec3(*xyaxes[:3]))
zaxis = wp.normalize(wp.vec3(*xyaxes[3:]))
yaxis = wp.normalize(wp.cross(zaxis, xaxis))
rot_matrix = np.array([xaxis, yaxis, zaxis]).T
return wp.quat_from_matrix(rot_matrix)
if "zaxis" in attrib:
zaxis = np.fromstring(attrib["zaxis"], sep=" ")
zaxis = wp.normalize(wp.vec3(*zaxis))
xaxis = wp.normalize(wp.cross(wp.vec3(0, 0, 1), zaxis))
yaxis = wp.normalize(wp.cross(zaxis, xaxis))
rot_matrix = np.array([xaxis, yaxis, zaxis]).T
return wp.quat_from_matrix(rot_matrix)
return wp.quat_identity()
def parse_mesh(geom):
import trimesh
faces = []
vertices = []
stl_file = mesh_assets[geom["mesh"]]
m = trimesh.load(stl_file)
for v in m.vertices:
vertices.append(np.array(v) * scale)
for f in m.faces:
faces.append(int(f[0]))
faces.append(int(f[1]))
faces.append(int(f[2]))
return wp.sim.Mesh(vertices, faces), m.scale
def parse_body(body, parent, incoming_defaults: dict):
body_class = body.get("childclass")
if body_class is None:
defaults = incoming_defaults
else:
for pattern in ignore_classes:
if re.match(pattern, body_class):
return
defaults = merge_attrib(incoming_defaults, class_defaults[body_class])
if "body" in defaults:
body_attrib = merge_attrib(defaults["body"], body.attrib)
else:
body_attrib = body.attrib
body_name = body_attrib["name"]
body_pos = parse_vec(body_attrib, "pos", (0.0, 0.0, 0.0))
body_ori = parse_orientation(body_attrib)
if parent == -1:
body_pos = wp.transform_point(xform, body_pos)
body_ori = xform.q * body_ori
body_pos *= scale
joint_armature = []
joint_name = []
joint_pos = []
linear_axes = []
angular_axes = []
joint_type = None
freejoint_tags = body.findall("freejoint")
if len(freejoint_tags) > 0:
joint_type = wp.sim.JOINT_FREE
joint_name.append(freejoint_tags[0].attrib.get("name", f"{body_name}_freejoint"))
else:
joints = body.findall("joint")
for _i, joint in enumerate(joints):
if "joint" in defaults:
joint_attrib = merge_attrib(defaults["joint"], joint.attrib)
else:
joint_attrib = joint.attrib
# default to hinge if not specified
joint_type_str = joint_attrib.get("type", "hinge")
joint_name.append(joint_attrib["name"])
joint_pos.append(parse_vec(joint_attrib, "pos", (0.0, 0.0, 0.0)) * scale)
joint_range = parse_vec(joint_attrib, "range", (-3.0, 3.0))
joint_armature.append(parse_float(joint_attrib, "armature", armature) * armature_scale)
if joint_type_str == "free":
joint_type = wp.sim.JOINT_FREE
break
if joint_type_str == "fixed":
joint_type = wp.sim.JOINT_FIXED
break
is_angular = joint_type_str == "hinge"
mode = wp.sim.JOINT_MODE_FORCE
if stiffness > 0.0 or "stiffness" in joint_attrib:
mode = wp.sim.JOINT_MODE_TARGET_POSITION
axis_vec = parse_vec(joint_attrib, "axis", (0.0, 0.0, 0.0))
ax = wp.sim.model.JointAxis(
axis=axis_vec,
limit_lower=(np.deg2rad(joint_range[0]) if is_angular and use_degrees else joint_range[0]),
limit_upper=(np.deg2rad(joint_range[1]) if is_angular and use_degrees else joint_range[1]),
target_ke=parse_float(joint_attrib, "stiffness", stiffness),
target_kd=parse_float(joint_attrib, "damping", damping),
limit_ke=limit_ke,
limit_kd=limit_kd,
mode=mode,
)
if is_angular:
angular_axes.append(ax)
else:
linear_axes.append(ax)
link = builder.add_body(
origin=wp.transform(body_pos, body_ori), # will be evaluated in fk()
armature=joint_armature[0] if len(joint_armature) > 0 else armature,
name=body_name,
)
if joint_type is None:
if len(linear_axes) == 0:
if len(angular_axes) == 0:
joint_type = wp.sim.JOINT_FIXED
elif len(angular_axes) == 1:
joint_type = wp.sim.JOINT_REVOLUTE
elif len(angular_axes) == 2:
joint_type = wp.sim.JOINT_UNIVERSAL
elif len(angular_axes) == 3:
joint_type = wp.sim.JOINT_COMPOUND
elif len(linear_axes) == 1 and len(angular_axes) == 0:
joint_type = wp.sim.JOINT_PRISMATIC
else:
joint_type = wp.sim.JOINT_D6
joint_pos = joint_pos[0] if len(joint_pos) > 0 else (0.0, 0.0, 0.0)
builder.add_joint(
joint_type,
parent,
link,
linear_axes,
angular_axes,
name="_".join(joint_name),
parent_xform=wp.transform(body_pos + joint_pos, body_ori),
child_xform=wp.transform(joint_pos, wp.quat_identity()),
armature=joint_armature[0] if len(joint_armature) > 0 else armature,
)
# -----------------
# add shapes
for geo_count, geom in enumerate(body.findall("geom")):
geom_defaults = defaults
if "class" in geom.attrib:
geom_class = geom.attrib["class"]
ignore_geom = False
for pattern in ignore_classes:
if re.match(pattern, geom_class):
ignore_geom = True
break
if ignore_geom:
continue
if geom_class in class_defaults:
geom_defaults = merge_attrib(defaults, class_defaults[geom_class])
if "geom" in geom_defaults:
geom_attrib = merge_attrib(geom_defaults["geom"], geom.attrib)
else:
geom_attrib = geom.attrib
geom_name = geom_attrib.get("name", f"{body_name}_geom_{geo_count}")
geom_type = geom_attrib.get("type", "sphere")
if "mesh" in geom_attrib:
geom_type = "mesh"
geom_size = parse_vec(geom_attrib, "size", [1.0, 1.0, 1.0]) * scale
geom_pos = parse_vec(geom_attrib, "pos", (0.0, 0.0, 0.0)) * scale
geom_rot = parse_orientation(geom_attrib)
geom_density = parse_float(geom_attrib, "density", density)
if geom_type == "sphere":
builder.add_shape_sphere(
link,
pos=geom_pos,
rot=geom_rot,
radius=geom_size[0],
density=geom_density,
**contact_vars,
)
elif geom_type == "box":
builder.add_shape_box(
link,
pos=geom_pos,
rot=geom_rot,
hx=geom_size[0],
hy=geom_size[1],
hz=geom_size[2],
density=geom_density,
**contact_vars,
)
elif geom_type == "mesh" and parse_meshes:
mesh, _ = parse_mesh(geom_attrib)
if "mesh" in defaults:
mesh_scale = parse_vec(defaults["mesh"], "scale", [1.0, 1.0, 1.0])
else:
mesh_scale = [1.0, 1.0, 1.0]
# as per the Mujoco XML reference, ignore geom size attribute
assert len(geom_size) == 3, "need to specify size for mesh geom"
builder.add_shape_mesh(
body=link,
pos=geom_pos,
rot=geom_rot,
mesh=mesh,
scale=mesh_scale,
density=density,
**contact_vars,
)
elif geom_type in {"capsule", "cylinder"}:
if "fromto" in geom_attrib:
geom_fromto = parse_vec(geom_attrib, "fromto", (0.0, 0.0, 0.0, 1.0, 0.0, 0.0))
start = wp.vec3(geom_fromto[0:3]) * scale
end = wp.vec3(geom_fromto[3:6]) * scale
# compute rotation to align the Warp capsule (along x-axis), with mjcf fromto direction
axis = wp.normalize(end - start)
angle = math.acos(wp.dot(axis, wp.vec3(0.0, 1.0, 0.0)))
axis = wp.normalize(wp.cross(axis, wp.vec3(0.0, 1.0, 0.0)))
geom_pos = (start + end) * 0.5
geom_rot = wp.quat_from_axis_angle(axis, -angle)
geom_radius = geom_size[0]
geom_height = wp.length(end - start) * 0.5
geom_up_axis = 1
else:
geom_radius = geom_size[0]
geom_height = geom_size[1]
geom_up_axis = up_axis
if geom_type == "cylinder":
builder.add_shape_cylinder(
link,
pos=geom_pos,
rot=geom_rot,
radius=geom_radius,
half_height=geom_height,
density=density,
up_axis=geom_up_axis,
**contact_vars,
)
else:
builder.add_shape_capsule(
link,
pos=geom_pos,
rot=geom_rot,
radius=geom_radius,
half_height=geom_height,
density=density,
up_axis=geom_up_axis,
**contact_vars,
)
else:
print(f"MJCF parsing shape {geom_name} issue: geom type {geom_type} is unsupported")
# -----------------
# recurse
for child in body.findall("body"):
parse_body(child, link, defaults)
# -----------------
# start articulation
start_shape_count = len(builder.shape_geo_type)
builder.add_articulation()
world = root.find("worldbody")
world_class = get_class(world)
world_defaults = merge_attrib(class_defaults["__all__"], class_defaults.get(world_class, {}))
for body in world.findall("body"):
parse_body(body, -1, world_defaults)
end_shape_count = len(builder.shape_geo_type)
if not enable_self_collisions:
for i in range(start_shape_count, end_shape_count):
for j in range(i + 1, end_shape_count):
builder.shape_collision_filter_pairs.add((i, j))
if collapse_fixed_joints:
builder.collapse_fixed_joints()
| 19,306 | Python | 38.402041 | 170 | 0.532995 |
NVIDIA/warp/warp/sim/integrator_xpbd.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import warp as wp
from .integrator import Integrator
from .model import (
JOINT_MODE_FORCE,
JOINT_MODE_TARGET_POSITION,
JOINT_MODE_TARGET_VELOCITY,
PARTICLE_FLAG_ACTIVE,
Control,
Model,
ModelShapeMaterials,
State,
)
from .utils import vec_abs, vec_leaky_max, vec_leaky_min, vec_max, vec_min, velocity_at_point
@wp.kernel
def solve_particle_ground_contacts(
particle_x: wp.array(dtype=wp.vec3),
particle_v: wp.array(dtype=wp.vec3),
invmass: wp.array(dtype=float),
particle_radius: wp.array(dtype=float),
particle_flags: wp.array(dtype=wp.uint32),
ke: float,
kd: float,
kf: float,
mu: float,
ground: wp.array(dtype=float),
dt: float,
relaxation: float,
delta: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
if (particle_flags[tid] & PARTICLE_FLAG_ACTIVE) == 0:
return
wi = invmass[tid]
if wi == 0.0:
return
x = particle_x[tid]
v = particle_v[tid]
n = wp.vec3(ground[0], ground[1], ground[2])
c = wp.min(wp.dot(n, x) + ground[3] - particle_radius[tid], 0.0)
if c > 0.0:
return
# normal
lambda_n = c
delta_n = n * lambda_n
# friction
vn = wp.dot(n, v)
vt = v - n * vn
lambda_f = wp.max(mu * lambda_n, 0.0 - wp.length(vt) * dt)
delta_f = wp.normalize(vt) * lambda_f
wp.atomic_add(delta, tid, (delta_f - delta_n) * relaxation)
@wp.kernel
def apply_particle_shape_restitution(
particle_x_new: wp.array(dtype=wp.vec3),
particle_v_new: wp.array(dtype=wp.vec3),
particle_x_old: wp.array(dtype=wp.vec3),
particle_v_old: wp.array(dtype=wp.vec3),
particle_invmass: wp.array(dtype=float),
particle_radius: wp.array(dtype=float),
particle_flags: wp.array(dtype=wp.uint32),
body_q: wp.array(dtype=wp.transform),
body_qd: wp.array(dtype=wp.spatial_vector),
body_com: wp.array(dtype=wp.vec3),
body_m_inv: wp.array(dtype=float),
body_I_inv: wp.array(dtype=wp.mat33),
shape_body: wp.array(dtype=int),
shape_materials: ModelShapeMaterials,
particle_ka: float,
restitution: float,
contact_count: wp.array(dtype=int),
contact_particle: wp.array(dtype=int),
contact_shape: wp.array(dtype=int),
contact_body_pos: wp.array(dtype=wp.vec3),
contact_body_vel: wp.array(dtype=wp.vec3),
contact_normal: wp.array(dtype=wp.vec3),
contact_max: int,
dt: float,
relaxation: float,
particle_v_out: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
count = min(contact_max, contact_count[0])
if tid >= count:
return
shape_index = contact_shape[tid]
body_index = shape_body[shape_index]
particle_index = contact_particle[tid]
if (particle_flags[particle_index] & PARTICLE_FLAG_ACTIVE) == 0:
return
# x_new = particle_x_new[particle_index]
v_new = particle_v_new[particle_index]
px = particle_x_old[particle_index]
v_old = particle_v_old[particle_index]
X_wb = wp.transform_identity()
# X_com = wp.vec3()
if body_index >= 0:
X_wb = body_q[body_index]
# X_com = body_com[body_index]
# body position in world space
bx = wp.transform_point(X_wb, contact_body_pos[tid])
# r = bx - wp.transform_point(X_wb, X_com)
n = contact_normal[tid]
c = wp.dot(n, px - bx) - particle_radius[particle_index]
if c > particle_ka:
return
rel_vel_old = wp.dot(n, v_old)
rel_vel_new = wp.dot(n, v_new)
if rel_vel_old < 0.0:
# dv = -n * wp.max(-rel_vel_new + wp.max(-restitution * rel_vel_old, 0.0), 0.0)
dv = n * (-rel_vel_new + wp.max(-restitution * rel_vel_old, 0.0))
# compute inverse masses
# w1 = particle_invmass[particle_index]
# w2 = 0.0
# if body_index >= 0:
# angular = wp.cross(r, n)
# q = wp.transform_get_rotation(X_wb)
# rot_angular = wp.quat_rotate_inv(q, angular)
# I_inv = body_I_inv[body_index]
# w2 = body_m_inv[body_index] + wp.dot(rot_angular, I_inv * rot_angular)
# denom = w1 + w2
# if denom == 0.0:
# return
wp.atomic_add(particle_v_out, tid, dv)
@wp.kernel
def apply_particle_ground_restitution(
particle_x_new: wp.array(dtype=wp.vec3),
particle_v_new: wp.array(dtype=wp.vec3),
particle_x_old: wp.array(dtype=wp.vec3),
particle_v_old: wp.array(dtype=wp.vec3),
particle_invmass: wp.array(dtype=float),
particle_radius: wp.array(dtype=float),
particle_flags: wp.array(dtype=wp.uint32),
particle_ka: float,
restitution: float,
ground: wp.array(dtype=float),
dt: float,
relaxation: float,
particle_v_out: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
if (particle_flags[tid] & PARTICLE_FLAG_ACTIVE) == 0:
return
wi = particle_invmass[tid]
if wi == 0.0:
return
x = particle_x_old[tid]
v_old = particle_v_old[tid]
v_new = particle_v_new[tid]
n = wp.vec3(ground[0], ground[1], ground[2])
c = wp.dot(n, x) + ground[3] - particle_radius[tid]
if c > particle_ka:
return
vn = wp.dot(n, v_old)
vn_new = wp.dot(n, v_new)
if vn < 0.0:
dv = n * (-vn_new + wp.max(-restitution * vn, 0.0))
wp.atomic_add(particle_v_out, tid, dv)
@wp.kernel
def solve_particle_shape_contacts(
particle_x: wp.array(dtype=wp.vec3),
particle_v: wp.array(dtype=wp.vec3),
particle_invmass: wp.array(dtype=float),
particle_radius: wp.array(dtype=float),
particle_flags: wp.array(dtype=wp.uint32),
body_q: wp.array(dtype=wp.transform),
body_qd: wp.array(dtype=wp.spatial_vector),
body_com: wp.array(dtype=wp.vec3),
body_m_inv: wp.array(dtype=float),
body_I_inv: wp.array(dtype=wp.mat33),
shape_body: wp.array(dtype=int),
shape_materials: ModelShapeMaterials,
particle_mu: float,
particle_ka: float,
contact_count: wp.array(dtype=int),
contact_particle: wp.array(dtype=int),
contact_shape: wp.array(dtype=int),
contact_body_pos: wp.array(dtype=wp.vec3),
contact_body_vel: wp.array(dtype=wp.vec3),
contact_normal: wp.array(dtype=wp.vec3),
contact_max: int,
dt: float,
relaxation: float,
# outputs
delta: wp.array(dtype=wp.vec3),
body_delta: wp.array(dtype=wp.spatial_vector),
):
tid = wp.tid()
count = min(contact_max, contact_count[0])
if tid >= count:
return
shape_index = contact_shape[tid]
body_index = shape_body[shape_index]
particle_index = contact_particle[tid]
if (particle_flags[particle_index] & PARTICLE_FLAG_ACTIVE) == 0:
return
px = particle_x[particle_index]
pv = particle_v[particle_index]
X_wb = wp.transform_identity()
X_com = wp.vec3()
if body_index >= 0:
X_wb = body_q[body_index]
X_com = body_com[body_index]
# body position in world space
bx = wp.transform_point(X_wb, contact_body_pos[tid])
r = bx - wp.transform_point(X_wb, X_com)
n = contact_normal[tid]
c = wp.dot(n, px - bx) - particle_radius[particle_index]
if c > particle_ka:
return
# take average material properties of shape and particle parameters
mu = 0.5 * (particle_mu + shape_materials.mu[shape_index])
# body velocity
body_v_s = wp.spatial_vector()
if body_index >= 0:
body_v_s = body_qd[body_index]
body_w = wp.spatial_top(body_v_s)
body_v = wp.spatial_bottom(body_v_s)
# compute the body velocity at the particle position
bv = body_v + wp.cross(body_w, r) + wp.transform_vector(X_wb, contact_body_vel[tid])
# relative velocity
v = pv - bv
# normal
lambda_n = c
delta_n = n * lambda_n
# friction
vn = wp.dot(n, v)
vt = v - n * vn
# compute inverse masses
w1 = particle_invmass[particle_index]
w2 = 0.0
if body_index >= 0:
angular = wp.cross(r, n)
q = wp.transform_get_rotation(X_wb)
rot_angular = wp.quat_rotate_inv(q, angular)
I_inv = body_I_inv[body_index]
w2 = body_m_inv[body_index] + wp.dot(rot_angular, I_inv * rot_angular)
denom = w1 + w2
if denom == 0.0:
return
lambda_f = wp.max(mu * lambda_n, -wp.length(vt) * dt)
delta_f = wp.normalize(vt) * lambda_f
delta_total = (delta_f - delta_n) / denom * relaxation
wp.atomic_add(delta, particle_index, w1 * delta_total)
if body_index >= 0:
delta_t = wp.cross(r, delta_total)
wp.atomic_sub(body_delta, body_index, wp.spatial_vector(delta_t, delta_total))
@wp.kernel
def solve_particle_particle_contacts(
grid: wp.uint64,
particle_x: wp.array(dtype=wp.vec3),
particle_v: wp.array(dtype=wp.vec3),
particle_invmass: wp.array(dtype=float),
particle_radius: wp.array(dtype=float),
particle_flags: wp.array(dtype=wp.uint32),
k_mu: float,
k_cohesion: float,
max_radius: float,
dt: float,
relaxation: float,
# outputs
deltas: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
# order threads by cell
i = wp.hash_grid_point_id(grid, tid)
if i == -1:
# hash grid has not been built yet
return
if (particle_flags[i] & PARTICLE_FLAG_ACTIVE) == 0:
return
x = particle_x[i]
v = particle_v[i]
radius = particle_radius[i]
w1 = particle_invmass[i]
# particle contact
query = wp.hash_grid_query(grid, x, radius + max_radius + k_cohesion)
index = int(0)
delta = wp.vec3(0.0)
while wp.hash_grid_query_next(query, index):
if (particle_flags[index] & PARTICLE_FLAG_ACTIVE) != 0 and index != i:
# compute distance to point
n = x - particle_x[index]
d = wp.length(n)
err = d - radius - particle_radius[index]
# compute inverse masses
w2 = particle_invmass[index]
denom = w1 + w2
if err <= k_cohesion and denom > 0.0:
n = n / d
vrel = v - particle_v[index]
# normal
lambda_n = err
delta_n = n * lambda_n
# friction
vn = wp.dot(n, vrel)
vt = v - n * vn
lambda_f = wp.max(k_mu * lambda_n, -wp.length(vt) * dt)
delta_f = wp.normalize(vt) * lambda_f
delta += (delta_f - delta_n) / denom
wp.atomic_add(deltas, i, delta * w1 * relaxation)
@wp.kernel
def solve_springs(
x: wp.array(dtype=wp.vec3),
v: wp.array(dtype=wp.vec3),
invmass: wp.array(dtype=float),
spring_indices: wp.array(dtype=int),
spring_rest_lengths: wp.array(dtype=float),
spring_stiffness: wp.array(dtype=float),
spring_damping: wp.array(dtype=float),
dt: float,
lambdas: wp.array(dtype=float),
delta: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
i = spring_indices[tid * 2 + 0]
j = spring_indices[tid * 2 + 1]
ke = spring_stiffness[tid]
kd = spring_damping[tid]
rest = spring_rest_lengths[tid]
xi = x[i]
xj = x[j]
vi = v[i]
vj = v[j]
xij = xi - xj
vij = vi - vj
l = wp.length(xij)
if l == 0.0:
return
n = xij / l
c = l - rest
grad_c_xi = n
grad_c_xj = -1.0 * n
wi = invmass[i]
wj = invmass[j]
denom = wi + wj
# Note strict inequality for damping -- 0 damping is ok
if denom <= 0.0 or ke <= 0.0 or kd < 0.0:
return
alpha = 1.0 / (ke * dt * dt)
gamma = kd / (ke * dt)
grad_c_dot_v = dt * wp.dot(grad_c_xi, vij) # Note: dt because from the paper we want x_i - x^n, not v...
dlambda = -1.0 * (c + alpha * lambdas[tid] + gamma * grad_c_dot_v) / ((1.0 + gamma) * denom + alpha)
dxi = wi * dlambda * grad_c_xi
dxj = wj * dlambda * grad_c_xj
lambdas[tid] = lambdas[tid] + dlambda
wp.atomic_add(delta, i, dxi)
wp.atomic_add(delta, j, dxj)
@wp.kernel
def bending_constraint(
x: wp.array(dtype=wp.vec3),
v: wp.array(dtype=wp.vec3),
invmass: wp.array(dtype=float),
indices: wp.array2d(dtype=int),
rest: wp.array(dtype=float),
bending_properties: wp.array2d(dtype=float),
dt: float,
lambdas: wp.array(dtype=float),
delta: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
eps = 1.0e-6
ke = bending_properties[tid, 0]
kd = bending_properties[tid, 1]
i = indices[tid, 0]
j = indices[tid, 1]
k = indices[tid, 2]
l = indices[tid, 3]
if i == -1 or j == -1 or k == -1 or l == -1:
return
rest_angle = rest[tid]
x1 = x[i]
x2 = x[j]
x3 = x[k]
x4 = x[l]
v1 = v[i]
v2 = v[j]
v3 = v[k]
v4 = v[l]
w1 = invmass[i]
w2 = invmass[j]
w3 = invmass[k]
w4 = invmass[l]
n1 = wp.cross(x3 - x1, x4 - x1) # normal to face 1
n2 = wp.cross(x4 - x2, x3 - x2) # normal to face 2
n1_length = wp.length(n1)
n2_length = wp.length(n2)
if n1_length < eps or n2_length < eps:
return
n1 /= n1_length
n2 /= n2_length
cos_theta = wp.dot(n1, n2)
e = x4 - x3
e_hat = wp.normalize(e)
e_length = wp.length(e)
derivative_flip = wp.sign(wp.dot(wp.cross(n1, n2), e))
derivative_flip *= -1.0
angle = wp.acos(cos_theta)
grad_x1 = n1 * e_length * derivative_flip
grad_x2 = n2 * e_length * derivative_flip
grad_x3 = (n1 * wp.dot(x1 - x4, e_hat) + n2 * wp.dot(x2 - x4, e_hat)) * derivative_flip
grad_x4 = (n1 * wp.dot(x3 - x1, e_hat) + n2 * wp.dot(x3 - x2, e_hat)) * derivative_flip
c = angle - rest_angle
denominator = (
w1 * wp.length_sq(grad_x1)
+ w2 * wp.length_sq(grad_x2)
+ w3 * wp.length_sq(grad_x3)
+ w4 * wp.length_sq(grad_x4)
)
# Note strict inequality for damping -- 0 damping is ok
if denominator <= 0.0 or ke <= 0.0 or kd < 0.0:
return
alpha = 1.0 / (ke * dt * dt)
gamma = kd / (ke * dt)
grad_dot_v = dt * (wp.dot(grad_x1, v1) + wp.dot(grad_x2, v2) + wp.dot(grad_x3, v3) + wp.dot(grad_x4, v4))
dlambda = -1.0 * (c + alpha * lambdas[tid] + gamma * grad_dot_v) / ((1.0 + gamma) * denominator + alpha)
delta0 = w1 * dlambda * grad_x1
delta1 = w2 * dlambda * grad_x2
delta2 = w3 * dlambda * grad_x3
delta3 = w4 * dlambda * grad_x4
lambdas[tid] = lambdas[tid] + dlambda
wp.atomic_add(delta, i, delta0)
wp.atomic_add(delta, j, delta1)
wp.atomic_add(delta, k, delta2)
wp.atomic_add(delta, l, delta3)
@wp.kernel
def solve_tetrahedra(
x: wp.array(dtype=wp.vec3),
v: wp.array(dtype=wp.vec3),
inv_mass: wp.array(dtype=float),
indices: wp.array(dtype=int, ndim=2),
rest_matrix: wp.array(dtype=wp.mat33),
activation: wp.array(dtype=float),
materials: wp.array(dtype=float, ndim=2),
dt: float,
relaxation: float,
delta: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
i = indices[tid, 0]
j = indices[tid, 1]
k = indices[tid, 2]
l = indices[tid, 3]
# act = activation[tid]
# k_mu = materials[tid, 0]
# k_lambda = materials[tid, 1]
# k_damp = materials[tid, 2]
x0 = x[i]
x1 = x[j]
x2 = x[k]
x3 = x[l]
# v0 = v[i]
# v1 = v[j]
# v2 = v[k]
# v3 = v[l]
w0 = inv_mass[i]
w1 = inv_mass[j]
w2 = inv_mass[k]
w3 = inv_mass[l]
x10 = x1 - x0
x20 = x2 - x0
x30 = x3 - x0
Ds = wp.mat33(x10, x20, x30)
Dm = rest_matrix[tid]
inv_QT = wp.transpose(Dm)
inv_rest_volume = wp.determinant(Dm) * 6.0
# F = Xs*Xm^-1
F = Ds * Dm
f1 = wp.vec3(F[0, 0], F[1, 0], F[2, 0])
f2 = wp.vec3(F[0, 1], F[1, 1], F[2, 1])
f3 = wp.vec3(F[0, 2], F[1, 2], F[2, 2])
tr = wp.dot(f1, f1) + wp.dot(f2, f2) + wp.dot(f3, f3)
C = float(0.0)
dC = wp.mat33(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
compliance = float(0.0)
stretching_compliance = relaxation
volume_compliance = relaxation
num_terms = 2
for term in range(0, num_terms):
if term == 0:
# deviatoric, stable
C = tr - 3.0
dC = F * 2.0
compliance = stretching_compliance
elif term == 1:
# volume conservation
C = wp.determinant(F) - 1.0
dC = wp.mat33(wp.cross(f2, f3), wp.cross(f3, f1), wp.cross(f1, f2))
compliance = volume_compliance
if C != 0.0:
dP = dC * inv_QT
grad1 = wp.vec3(dP[0][0], dP[1][0], dP[2][0])
grad2 = wp.vec3(dP[0][1], dP[1][1], dP[2][1])
grad3 = wp.vec3(dP[0][2], dP[1][2], dP[2][2])
grad0 = -grad1 - grad2 - grad3
w = (
wp.dot(grad0, grad0) * w0
+ wp.dot(grad1, grad1) * w1
+ wp.dot(grad2, grad2) * w2
+ wp.dot(grad3, grad3) * w3
)
if w > 0.0:
alpha = compliance / dt / dt
if inv_rest_volume > 0.0:
alpha *= inv_rest_volume
dlambda = -C / (w + alpha)
wp.atomic_add(delta, i, w0 * dlambda * grad0)
wp.atomic_add(delta, j, w1 * dlambda * grad1)
wp.atomic_add(delta, k, w2 * dlambda * grad2)
wp.atomic_add(delta, l, w3 * dlambda * grad3)
# wp.atomic_add(particle.num_corr, id0, 1)
# wp.atomic_add(particle.num_corr, id1, 1)
# wp.atomic_add(particle.num_corr, id2, 1)
# wp.atomic_add(particle.num_corr, id3, 1)
# C_Spherical
# r_s = wp.sqrt(wp.dot(f1, f1) + wp.dot(f2, f2) + wp.dot(f3, f3))
# r_s_inv = 1.0/r_s
# C = r_s - wp.sqrt(3.0)
# dCdx = F*wp.transpose(Dm)*r_s_inv
# alpha = 1.0
# C_D
# r_s = wp.sqrt(wp.dot(f1, f1) + wp.dot(f2, f2) + wp.dot(f3, f3))
# C = r_s*r_s - 3.0
# dCdx = F*wp.transpose(Dm)*2.0
# alpha = 1.0
# grad1 = wp.vec3(dCdx[0, 0], dCdx[1, 0], dCdx[2, 0])
# grad2 = wp.vec3(dCdx[0, 1], dCdx[1, 1], dCdx[2, 1])
# grad3 = wp.vec3(dCdx[0, 2], dCdx[1, 2], dCdx[2, 2])
# grad0 = (grad1 + grad2 + grad3) * (0.0 - 1.0)
# denom = (
# wp.dot(grad0, grad0) * w0 + wp.dot(grad1, grad1) * w1 + wp.dot(grad2, grad2) * w2 + wp.dot(grad3, grad3) * w3
# )
# multiplier = C / (denom + 1.0 / (k_mu * dt * dt * rest_volume))
# delta0 = grad0 * multiplier
# delta1 = grad1 * multiplier
# delta2 = grad2 * multiplier
# delta3 = grad3 * multiplier
# # hydrostatic part
# J = wp.determinant(F)
# C_vol = J - alpha
# # dCdx = wp.mat33(wp.cross(f2, f3), wp.cross(f3, f1), wp.cross(f1, f2))*wp.transpose(Dm)
# # grad1 = wp.vec3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
# # grad2 = wp.vec3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
# # grad3 = wp.vec3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
# # grad0 = (grad1 + grad2 + grad3)*(0.0 - 1.0)
# s = inv_rest_volume / 6.0
# grad1 = wp.cross(x20, x30) * s
# grad2 = wp.cross(x30, x10) * s
# grad3 = wp.cross(x10, x20) * s
# grad0 = -(grad1 + grad2 + grad3)
# denom = (
# wp.dot(grad0, grad0) * w0 + wp.dot(grad1, grad1) * w1 + wp.dot(grad2, grad2) * w2 + wp.dot(grad3, grad3) * w3
# )
# multiplier = C_vol / (denom + 1.0 / (k_lambda * dt * dt * rest_volume))
# delta0 += grad0 * multiplier
# delta1 += grad1 * multiplier
# delta2 += grad2 * multiplier
# delta3 += grad3 * multiplier
# # # apply forces
# # wp.atomic_sub(delta, i, delta0 * w0 * relaxation)
# # wp.atomic_sub(delta, j, delta1 * w1 * relaxation)
# # wp.atomic_sub(delta, k, delta2 * w2 * relaxation)
# # wp.atomic_sub(delta, l, delta3 * w3 * relaxation)
@wp.kernel
def solve_tetrahedra2(
x: wp.array(dtype=wp.vec3),
v: wp.array(dtype=wp.vec3),
inv_mass: wp.array(dtype=float),
indices: wp.array(dtype=int, ndim=2),
pose: wp.array(dtype=wp.mat33),
activation: wp.array(dtype=float),
materials: wp.array(dtype=float, ndim=2),
dt: float,
relaxation: float,
delta: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
i = indices[tid, 0]
j = indices[tid, 1]
k = indices[tid, 2]
l = indices[tid, 3]
# act = activation[tid]
k_mu = materials[tid, 0]
k_lambda = materials[tid, 1]
# k_damp = materials[tid, 2]
x0 = x[i]
x1 = x[j]
x2 = x[k]
x3 = x[l]
# v0 = v[i]
# v1 = v[j]
# v2 = v[k]
# v3 = v[l]
w0 = inv_mass[i]
w1 = inv_mass[j]
w2 = inv_mass[k]
w3 = inv_mass[l]
x10 = x1 - x0
x20 = x2 - x0
x30 = x3 - x0
Ds = wp.mat33(x10, x20, x30)
Dm = pose[tid]
inv_rest_volume = wp.determinant(Dm) * 6.0
rest_volume = 1.0 / inv_rest_volume
# F = Xs*Xm^-1
F = Ds * Dm
f1 = wp.vec3(F[0, 0], F[1, 0], F[2, 0])
f2 = wp.vec3(F[0, 1], F[1, 1], F[2, 1])
f3 = wp.vec3(F[0, 2], F[1, 2], F[2, 2])
# C_sqrt
# tr = wp.dot(f1, f1) + wp.dot(f2, f2) + wp.dot(f3, f3)
# r_s = wp.sqrt(abs(tr - 3.0))
# C = r_s
# if (r_s == 0.0):
# return
# if (tr < 3.0):
# r_s = 0.0 - r_s
# dCdx = F*wp.transpose(Dm)*(1.0/r_s)
# alpha = 1.0 + k_mu / k_lambda
# C_Neo
r_s = wp.sqrt(wp.dot(f1, f1) + wp.dot(f2, f2) + wp.dot(f3, f3))
if r_s == 0.0:
return
# tr = wp.dot(f1, f1) + wp.dot(f2, f2) + wp.dot(f3, f3)
# if (tr < 3.0):
# r_s = -r_s
r_s_inv = 1.0 / r_s
C = r_s
dCdx = F * wp.transpose(Dm) * r_s_inv
alpha = 1.0 + k_mu / k_lambda
# C_Spherical
# r_s = wp.sqrt(wp.dot(f1, f1) + wp.dot(f2, f2) + wp.dot(f3, f3))
# r_s_inv = 1.0/r_s
# C = r_s - wp.sqrt(3.0)
# dCdx = F*wp.transpose(Dm)*r_s_inv
# alpha = 1.0
# C_D
# r_s = wp.sqrt(wp.dot(f1, f1) + wp.dot(f2, f2) + wp.dot(f3, f3))
# C = r_s*r_s - 3.0
# dCdx = F*wp.transpose(Dm)*2.0
# alpha = 1.0
grad1 = wp.vec3(dCdx[0, 0], dCdx[1, 0], dCdx[2, 0])
grad2 = wp.vec3(dCdx[0, 1], dCdx[1, 1], dCdx[2, 1])
grad3 = wp.vec3(dCdx[0, 2], dCdx[1, 2], dCdx[2, 2])
grad0 = (grad1 + grad2 + grad3) * (0.0 - 1.0)
denom = (
wp.dot(grad0, grad0) * w0 + wp.dot(grad1, grad1) * w1 + wp.dot(grad2, grad2) * w2 + wp.dot(grad3, grad3) * w3
)
multiplier = C / (denom + 1.0 / (k_mu * dt * dt * rest_volume))
delta0 = grad0 * multiplier
delta1 = grad1 * multiplier
delta2 = grad2 * multiplier
delta3 = grad3 * multiplier
# hydrostatic part
J = wp.determinant(F)
C_vol = J - alpha
# dCdx = wp.mat33(wp.cross(f2, f3), wp.cross(f3, f1), wp.cross(f1, f2))*wp.transpose(Dm)
# grad1 = wp.vec3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
# grad2 = wp.vec3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
# grad3 = wp.vec3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
# grad0 = (grad1 + grad2 + grad3)*(0.0 - 1.0)
s = inv_rest_volume / 6.0
grad1 = wp.cross(x20, x30) * s
grad2 = wp.cross(x30, x10) * s
grad3 = wp.cross(x10, x20) * s
grad0 = -(grad1 + grad2 + grad3)
denom = (
wp.dot(grad0, grad0) * w0 + wp.dot(grad1, grad1) * w1 + wp.dot(grad2, grad2) * w2 + wp.dot(grad3, grad3) * w3
)
multiplier = C_vol / (denom + 1.0 / (k_lambda * dt * dt * rest_volume))
delta0 += grad0 * multiplier
delta1 += grad1 * multiplier
delta2 += grad2 * multiplier
delta3 += grad3 * multiplier
# apply forces
wp.atomic_sub(delta, i, delta0 * w0 * relaxation)
wp.atomic_sub(delta, j, delta1 * w1 * relaxation)
wp.atomic_sub(delta, k, delta2 * w2 * relaxation)
wp.atomic_sub(delta, l, delta3 * w3 * relaxation)
@wp.kernel
def apply_particle_deltas(
x_orig: wp.array(dtype=wp.vec3),
x_pred: wp.array(dtype=wp.vec3),
particle_flags: wp.array(dtype=wp.uint32),
delta: wp.array(dtype=wp.vec3),
dt: float,
v_max: float,
x_out: wp.array(dtype=wp.vec3),
v_out: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
if (particle_flags[tid] & PARTICLE_FLAG_ACTIVE) == 0:
return
x0 = x_orig[tid]
xp = x_pred[tid]
# constraint deltas
d = delta[tid]
x_new = xp + d
v_new = (x_new - x0) / dt
# enforce velocity limit to prevent instability
v_new_mag = wp.length(v_new)
if v_new_mag > v_max:
v_new *= v_max / v_new_mag
x_out[tid] = x_new
v_out[tid] = v_new
@wp.kernel
def apply_body_deltas(
q_in: wp.array(dtype=wp.transform),
qd_in: wp.array(dtype=wp.spatial_vector),
body_com: wp.array(dtype=wp.vec3),
body_I: wp.array(dtype=wp.mat33),
body_inv_m: wp.array(dtype=float),
body_inv_I: wp.array(dtype=wp.mat33),
deltas: wp.array(dtype=wp.spatial_vector),
constraint_inv_weights: wp.array(dtype=float),
dt: float,
# outputs
q_out: wp.array(dtype=wp.transform),
qd_out: wp.array(dtype=wp.spatial_vector),
):
tid = wp.tid()
inv_m = body_inv_m[tid]
if inv_m == 0.0:
q_out[tid] = q_in[tid]
qd_out[tid] = qd_in[tid]
return
inv_I = body_inv_I[tid]
tf = q_in[tid]
delta = deltas[tid]
p0 = wp.transform_get_translation(tf)
q0 = wp.transform_get_rotation(tf)
weight = 1.0
if constraint_inv_weights:
inv_weight = constraint_inv_weights[tid]
if inv_weight > 0.0:
weight = 1.0 / inv_weight
dp = wp.spatial_bottom(delta) * (inv_m * weight)
dq = wp.spatial_top(delta) * weight
dq = wp.quat_rotate(q0, inv_I * wp.quat_rotate_inv(q0, dq))
# update orientation
q1 = q0 + 0.5 * wp.quat(dq * dt, 0.0) * q0
q1 = wp.normalize(q1)
# update position
com = body_com[tid]
x_com = p0 + wp.quat_rotate(q0, com)
p1 = x_com + dp * dt
p1 -= wp.quat_rotate(q1, com)
q_out[tid] = wp.transform(p1, q1)
v0 = wp.spatial_bottom(qd_in[tid])
w0 = wp.spatial_top(qd_in[tid])
# update linear and angular velocity
v1 = v0 + dp
# angular part (compute in body frame)
wb = wp.quat_rotate_inv(q0, w0 + dq)
tb = -wp.cross(wb, body_I[tid] * wb) # coriolis forces
w1 = wp.quat_rotate(q0, wb + inv_I * tb * dt)
# XXX this improves gradient stability
if wp.length(v1) < 1e-4:
v1 = wp.vec3(0.0)
if wp.length(w1) < 1e-4:
w1 = wp.vec3(0.0)
qd_out[tid] = wp.spatial_vector(w1, v1)
@wp.kernel
def apply_body_delta_velocities(
deltas: wp.array(dtype=wp.spatial_vector),
qd_out: wp.array(dtype=wp.spatial_vector),
):
tid = wp.tid()
wp.atomic_add(qd_out, tid, deltas[tid])
@wp.kernel
def apply_joint_torques(
body_q: wp.array(dtype=wp.transform),
body_com: wp.array(dtype=wp.vec3),
joint_q_start: wp.array(dtype=int),
joint_qd_start: wp.array(dtype=int),
joint_type: wp.array(dtype=int),
joint_parent: wp.array(dtype=int),
joint_child: wp.array(dtype=int),
joint_X_p: wp.array(dtype=wp.transform),
joint_X_c: wp.array(dtype=wp.transform),
joint_axis_start: wp.array(dtype=int),
joint_axis_dim: wp.array(dtype=int, ndim=2),
joint_axis: wp.array(dtype=wp.vec3),
joint_axis_mode: wp.array(dtype=int),
joint_act: wp.array(dtype=float),
body_f: wp.array(dtype=wp.spatial_vector),
):
tid = wp.tid()
type = joint_type[tid]
if type == wp.sim.JOINT_FIXED:
return
if type == wp.sim.JOINT_FREE:
return
if type == wp.sim.JOINT_DISTANCE:
return
if type == wp.sim.JOINT_BALL:
return
# rigid body indices of the child and parent
id_c = joint_child[tid]
id_p = joint_parent[tid]
X_pj = joint_X_p[tid]
# X_cj = joint_X_c[tid]
X_wp = X_pj
pose_p = X_pj
com_p = wp.vec3(0.0)
# parent transform and moment arm
if id_p >= 0:
pose_p = body_q[id_p]
X_wp = pose_p * X_wp
com_p = body_com[id_p]
r_p = wp.transform_get_translation(X_wp) - wp.transform_point(pose_p, com_p)
# child transform and moment arm
pose_c = body_q[id_c]
X_wc = pose_c
com_c = body_com[id_c]
r_c = wp.transform_get_translation(X_wc) - wp.transform_point(pose_c, com_c)
# # local joint rotations
# q_p = wp.transform_get_rotation(X_wp)
# q_c = wp.transform_get_rotation(X_wc)
# joint properties (for 1D joints)
# q_start = joint_q_start[tid]
qd_start = joint_qd_start[tid]
axis_start = joint_axis_start[tid]
lin_axis_count = joint_axis_dim[tid, 0]
ang_axis_count = joint_axis_dim[tid, 1]
# total force/torque on the parent
t_total = wp.vec3()
f_total = wp.vec3()
# handle angular constraints
if type == wp.sim.JOINT_REVOLUTE:
mode = joint_axis_mode[axis_start]
if mode == wp.sim.JOINT_MODE_FORCE:
axis = joint_axis[axis_start]
act = joint_act[qd_start]
a_p = wp.transform_vector(X_wp, axis)
t_total += act * a_p
elif type == wp.sim.JOINT_PRISMATIC:
mode = joint_axis_mode[axis_start]
if mode == wp.sim.JOINT_MODE_FORCE:
axis = joint_axis[axis_start]
act = joint_act[qd_start]
a_p = wp.transform_vector(X_wp, axis)
f_total += act * a_p
elif type == wp.sim.JOINT_COMPOUND:
# q_off = wp.transform_get_rotation(X_cj)
# q_pc = wp.quat_inverse(q_off)*wp.quat_inverse(q_p)*q_c*q_off
# # decompose to a compound rotation each axis
# angles = quat_decompose(q_pc)
# # reconstruct rotation axes
# axis_0 = wp.vec3(1.0, 0.0, 0.0)
# q_0 = wp.quat_from_axis_angle(axis_0, angles[0])
# axis_1 = wp.quat_rotate(q_0, wp.vec3(0.0, 1.0, 0.0))
# q_1 = wp.quat_from_axis_angle(axis_1, angles[1])
# axis_2 = wp.quat_rotate(q_1*q_0, wp.vec3(0.0, 0.0, 1.0))
# q_w = q_p*q_off
# t_total += joint_act[qd_start+0] * wp.quat_rotate(q_w, axis_0)
# t_total += joint_act[qd_start+1] * wp.quat_rotate(q_w, axis_1)
# t_total += joint_act[qd_start+2] * wp.quat_rotate(q_w, axis_2)
if joint_axis_mode[axis_start + 0] == wp.sim.JOINT_MODE_FORCE:
axis_0 = joint_axis[axis_start + 0]
t_total += joint_act[qd_start + 0] * wp.transform_vector(X_wp, axis_0)
if joint_axis_mode[axis_start + 1] == wp.sim.JOINT_MODE_FORCE:
axis_1 = joint_axis[axis_start + 1]
t_total += joint_act[qd_start + 1] * wp.transform_vector(X_wp, axis_1)
if joint_axis_mode[axis_start + 2] == wp.sim.JOINT_MODE_FORCE:
axis_2 = joint_axis[axis_start + 2]
t_total += joint_act[qd_start + 2] * wp.transform_vector(X_wp, axis_2)
elif type == wp.sim.JOINT_UNIVERSAL:
# q_off = wp.transform_get_rotation(X_cj)
# q_pc = wp.quat_inverse(q_off)*wp.quat_inverse(q_p)*q_c*q_off
# # decompose to a compound rotation each axis
# angles = quat_decompose(q_pc)
# # reconstruct rotation axes
# axis_0 = wp.vec3(1.0, 0.0, 0.0)
# q_0 = wp.quat_from_axis_angle(axis_0, angles[0])
# axis_1 = wp.quat_rotate(q_0, wp.vec3(0.0, 1.0, 0.0))
# q_1 = wp.quat_from_axis_angle(axis_1, angles[1])
# axis_2 = wp.quat_rotate(q_1*q_0, wp.vec3(0.0, 0.0, 1.0))
# q_w = q_p*q_off
# free axes
# t_total += joint_act[qd_start+0] * wp.quat_rotate(q_w, axis_0)
# t_total += joint_act[qd_start+1] * wp.quat_rotate(q_w, axis_1)
if joint_axis_mode[axis_start + 0] == wp.sim.JOINT_MODE_FORCE:
axis_0 = joint_axis[axis_start + 0]
t_total += joint_act[qd_start + 0] * wp.transform_vector(X_wp, axis_0)
if joint_axis_mode[axis_start + 1] == wp.sim.JOINT_MODE_FORCE:
axis_1 = joint_axis[axis_start + 1]
t_total += joint_act[qd_start + 1] * wp.transform_vector(X_wp, axis_1)
elif type == wp.sim.JOINT_D6:
# unroll for loop to ensure joint actions remain differentiable
# (since differentiating through a dynamic for loop that updates a local variable is not supported)
if lin_axis_count > 0:
if joint_axis_mode[axis_start + 0] == wp.sim.JOINT_MODE_FORCE:
axis = joint_axis[axis_start + 0]
act = joint_act[qd_start + 0]
a_p = wp.transform_vector(X_wp, axis)
f_total += act * a_p
if lin_axis_count > 1:
if joint_axis_mode[axis_start + 1] == wp.sim.JOINT_MODE_FORCE:
axis = joint_axis[axis_start + 1]
act = joint_act[qd_start + 1]
a_p = wp.transform_vector(X_wp, axis)
f_total += act * a_p
if lin_axis_count > 2:
if joint_axis_mode[axis_start + 2] == wp.sim.JOINT_MODE_FORCE:
axis = joint_axis[axis_start + 2]
act = joint_act[qd_start + 2]
a_p = wp.transform_vector(X_wp, axis)
f_total += act * a_p
if ang_axis_count > 0:
if joint_axis_mode[axis_start + lin_axis_count + 0] == wp.sim.JOINT_MODE_FORCE:
axis = joint_axis[axis_start + lin_axis_count + 0]
act = joint_act[qd_start + lin_axis_count + 0]
a_p = wp.transform_vector(X_wp, axis)
t_total += act * a_p
if ang_axis_count > 1:
if joint_axis_mode[axis_start + lin_axis_count + 1] == wp.sim.JOINT_MODE_FORCE:
axis = joint_axis[axis_start + lin_axis_count + 1]
act = joint_act[qd_start + lin_axis_count + 1]
a_p = wp.transform_vector(X_wp, axis)
t_total += act * a_p
if ang_axis_count > 2:
if joint_axis_mode[axis_start + lin_axis_count + 2] == wp.sim.JOINT_MODE_FORCE:
axis = joint_axis[axis_start + lin_axis_count + 2]
act = joint_act[qd_start + lin_axis_count + 2]
a_p = wp.transform_vector(X_wp, axis)
t_total += act * a_p
else:
print("joint type not handled in apply_joint_torques")
# write forces
if id_p >= 0:
wp.atomic_sub(body_f, id_p, wp.spatial_vector(t_total + wp.cross(r_p, f_total), f_total))
wp.atomic_add(body_f, id_c, wp.spatial_vector(t_total + wp.cross(r_c, f_total), f_total))
@wp.func
def update_joint_axis_mode(mode: wp.int32, axis: wp.vec3, input_axis_mode: wp.vec3i):
# update the 3D axis mode flags given the axis vector and mode of this axis
mode_x = wp.max(wp.int32(wp.nonzero(axis[0])) * mode, input_axis_mode[0])
mode_y = wp.max(wp.int32(wp.nonzero(axis[1])) * mode, input_axis_mode[1])
mode_z = wp.max(wp.int32(wp.nonzero(axis[2])) * mode, input_axis_mode[2])
return wp.vec3i(mode_x, mode_y, mode_z)
@wp.func
def update_joint_axis_limits(axis: wp.vec3, limit_lower: float, limit_upper: float, input_limits: wp.spatial_vector):
# update the 3D linear/angular limits (spatial_vector [lower, upper]) given the axis vector and limits
lo_temp = axis * limit_lower
up_temp = axis * limit_upper
lo = vec_min(lo_temp, up_temp)
up = vec_max(lo_temp, up_temp)
input_lower = wp.spatial_top(input_limits)
input_upper = wp.spatial_bottom(input_limits)
lower = vec_min(input_lower, lo)
upper = vec_max(input_upper, up)
return wp.spatial_vector(lower, upper)
@wp.func
def update_joint_axis_target_ke_kd(
axis: wp.vec3, target: float, target_ke: float, target_kd: float, input_target_ke_kd: wp.mat33
):
# update the 3D linear/angular target, target_ke, and target_kd (mat33 [target, ke, kd]) given the axis vector and target, target_ke, target_kd
axis_target = input_target_ke_kd[0]
axis_ke = input_target_ke_kd[1]
axis_kd = input_target_ke_kd[2]
stiffness = axis * target_ke
axis_target += stiffness * target # weighted target (to be normalized later by sum of target_ke)
axis_ke += vec_abs(stiffness)
axis_kd += vec_abs(axis * target_kd)
return wp.mat33(
axis_target[0],
axis_target[1],
axis_target[2],
axis_ke[0],
axis_ke[1],
axis_ke[2],
axis_kd[0],
axis_kd[1],
axis_kd[2],
)
@wp.func
def compute_linear_correction_3d(
dx: wp.vec3,
r1: wp.vec3,
r2: wp.vec3,
tf1: wp.transform,
tf2: wp.transform,
m_inv1: float,
m_inv2: float,
I_inv1: wp.mat33,
I_inv2: wp.mat33,
lambda_in: float,
compliance: float,
damping: float,
dt: float,
) -> float:
c = wp.length(dx)
if c == 0.0:
# print("c == 0.0 in positional correction")
return 0.0
n = wp.normalize(dx)
q1 = wp.transform_get_rotation(tf1)
q2 = wp.transform_get_rotation(tf2)
# Eq. 2-3 (make sure to project into the frame of the body)
r1xn = wp.quat_rotate_inv(q1, wp.cross(r1, n))
r2xn = wp.quat_rotate_inv(q2, wp.cross(r2, n))
w1 = m_inv1 + wp.dot(r1xn, I_inv1 * r1xn)
w2 = m_inv2 + wp.dot(r2xn, I_inv2 * r2xn)
w = w1 + w2
if w == 0.0:
return 0.0
alpha = compliance
gamma = compliance * damping
# Eq. 4-5
d_lambda = -c - alpha * lambda_in
# TODO consider damping for velocity correction?
# delta_lambda = -(err + alpha * lambda_in + gamma * derr)
if w + alpha > 0.0:
d_lambda /= w * (dt + gamma) + alpha / dt
return d_lambda
@wp.func
def compute_angular_correction_3d(
corr: wp.vec3,
q1: wp.quat,
q2: wp.quat,
m_inv1: float,
m_inv2: float,
I_inv1: wp.mat33,
I_inv2: wp.mat33,
alpha_tilde: float,
# lambda_prev: float,
relaxation: float,
dt: float,
):
# compute and apply the correction impulse for an angular constraint
theta = wp.length(corr)
if theta == 0.0:
return 0.0
n = wp.normalize(corr)
# project variables to body rest frame as they are in local matrix
n1 = wp.quat_rotate_inv(q1, n)
n2 = wp.quat_rotate_inv(q2, n)
# Eq. 11-12
w1 = wp.dot(n1, I_inv1 * n1)
w2 = wp.dot(n2, I_inv2 * n2)
w = w1 + w2
if w == 0.0:
return 0.0
# Eq. 13-14
lambda_prev = 0.0
d_lambda = (-theta - alpha_tilde * lambda_prev) / (w * dt + alpha_tilde / dt)
# TODO consider lambda_prev?
# p = d_lambda * n * relaxation
# Eq. 15-16
return d_lambda
@wp.kernel
def solve_simple_body_joints(
body_q: wp.array(dtype=wp.transform),
body_qd: wp.array(dtype=wp.spatial_vector),
body_com: wp.array(dtype=wp.vec3),
body_inv_m: wp.array(dtype=float),
body_inv_I: wp.array(dtype=wp.mat33),
joint_type: wp.array(dtype=int),
joint_enabled: wp.array(dtype=int),
joint_parent: wp.array(dtype=int),
joint_child: wp.array(dtype=int),
joint_X_p: wp.array(dtype=wp.transform),
joint_X_c: wp.array(dtype=wp.transform),
joint_limit_lower: wp.array(dtype=float),
joint_limit_upper: wp.array(dtype=float),
joint_axis_start: wp.array(dtype=int),
joint_axis_dim: wp.array(dtype=int, ndim=2),
joint_axis_mode: wp.array(dtype=int),
joint_axis: wp.array(dtype=wp.vec3),
joint_target: wp.array(dtype=float),
joint_target_ke: wp.array(dtype=float),
joint_target_kd: wp.array(dtype=float),
joint_linear_compliance: wp.array(dtype=float),
joint_angular_compliance: wp.array(dtype=float),
angular_relaxation: float,
linear_relaxation: float,
dt: float,
deltas: wp.array(dtype=wp.spatial_vector),
):
tid = wp.tid()
type = joint_type[tid]
if joint_enabled[tid] == 0:
return
if type == wp.sim.JOINT_FREE:
return
if type == wp.sim.JOINT_COMPOUND:
return
if type == wp.sim.JOINT_UNIVERSAL:
return
if type == wp.sim.JOINT_DISTANCE:
return
if type == wp.sim.JOINT_D6:
return
# rigid body indices of the child and parent
id_c = joint_child[tid]
id_p = joint_parent[tid]
X_pj = joint_X_p[tid]
X_cj = joint_X_c[tid]
X_wp = X_pj
m_inv_p = 0.0
I_inv_p = wp.mat33(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
pose_p = X_pj
com_p = wp.vec3(0.0)
# parent transform and moment arm
if id_p >= 0:
pose_p = body_q[id_p]
X_wp = pose_p * X_wp
com_p = body_com[id_p]
m_inv_p = body_inv_m[id_p]
I_inv_p = body_inv_I[id_p]
r_p = wp.transform_get_translation(X_wp) - wp.transform_point(pose_p, com_p)
# child transform and moment arm
pose_c = body_q[id_c]
X_wc = pose_c * X_cj
com_c = body_com[id_c]
m_inv_c = body_inv_m[id_c]
I_inv_c = body_inv_I[id_c]
r_c = wp.transform_get_translation(X_wc) - wp.transform_point(pose_c, com_c)
if m_inv_p == 0.0 and m_inv_c == 0.0:
# connection between two immovable bodies
return
# accumulate constraint deltas
lin_delta_p = wp.vec3(0.0)
ang_delta_p = wp.vec3(0.0)
lin_delta_c = wp.vec3(0.0)
ang_delta_c = wp.vec3(0.0)
# rel_pose = wp.transform_inverse(X_wp) * X_wc
# rel_p = wp.transform_get_translation(rel_pose)
# joint connection points
# x_p = wp.transform_get_translation(X_wp)
x_c = wp.transform_get_translation(X_wc)
# linear_compliance = joint_linear_compliance[tid]
angular_compliance = joint_angular_compliance[tid]
damping = 0.0
axis_start = joint_axis_start[tid]
# mode = joint_axis_mode[axis_start]
# local joint rotations
q_p = wp.transform_get_rotation(X_wp)
q_c = wp.transform_get_rotation(X_wc)
inertial_q_p = wp.transform_get_rotation(pose_p)
inertial_q_c = wp.transform_get_rotation(pose_c)
# joint properties (for 1D joints)
axis = joint_axis[axis_start]
if type == wp.sim.JOINT_FIXED:
limit_lower = 0.0
limit_upper = 0.0
else:
limit_lower = joint_limit_lower[axis_start]
limit_upper = joint_limit_upper[axis_start]
# linear_alpha_tilde = linear_compliance / dt / dt
angular_alpha_tilde = angular_compliance / dt / dt
# prevent division by zero
# linear_alpha_tilde = wp.max(linear_alpha_tilde, 1e-6)
# angular_alpha_tilde = wp.max(angular_alpha_tilde, 1e-6)
# accumulate constraint deltas
lin_delta_p = wp.vec3(0.0)
ang_delta_p = wp.vec3(0.0)
lin_delta_c = wp.vec3(0.0)
ang_delta_c = wp.vec3(0.0)
# handle angular constraints
if type == wp.sim.JOINT_REVOLUTE:
# align joint axes
a_p = wp.quat_rotate(q_p, axis)
a_c = wp.quat_rotate(q_c, axis)
# Eq. 20
corr = wp.cross(a_p, a_c)
ncorr = wp.normalize(corr)
angular_relaxation = 0.2
# angular_correction(
# corr, inertial_q_p, inertial_q_c, m_inv_p, m_inv_c, I_inv_p, I_inv_c,
# angular_alpha_tilde, angular_relaxation, deltas, id_p, id_c)
lambda_n = compute_angular_correction_3d(
corr, inertial_q_p, inertial_q_c, m_inv_p, m_inv_c, I_inv_p, I_inv_c, angular_alpha_tilde, damping, dt
)
lambda_n *= angular_relaxation
ang_delta_p -= lambda_n * ncorr
ang_delta_c += lambda_n * ncorr
# limit joint angles (Alg. 3)
pi = 3.14159265359
two_pi = 2.0 * pi
if limit_lower > -two_pi or limit_upper < two_pi:
# find a perpendicular vector to joint axis
a = axis
# https://math.stackexchange.com/a/3582461
g = wp.sign(a[2])
h = a[2] + g
b = wp.vec3(g - a[0] * a[0] / h, -a[0] * a[1] / h, -a[0])
c = wp.normalize(wp.cross(a, b))
# b = c # TODO verify
# joint axis
n = wp.quat_rotate(q_p, a)
# the axes n1 and n2 are aligned with the two bodies
n1 = wp.quat_rotate(q_p, b)
n2 = wp.quat_rotate(q_c, b)
phi = wp.asin(wp.dot(wp.cross(n1, n2), n))
# print("phi")
# print(phi)
if wp.dot(n1, n2) < 0.0:
phi = pi - phi
if phi > pi:
phi -= two_pi
if phi < -pi:
phi += two_pi
if phi < limit_lower or phi > limit_upper:
phi = wp.clamp(phi, limit_lower, limit_upper)
# print("clamped phi")
# print(phi)
# rot = wp.quat(phi, n[0], n[1], n[2])
# rot = wp.quat(n, phi)
rot = wp.quat_from_axis_angle(n, phi)
n1 = wp.quat_rotate(rot, n1)
corr = wp.cross(n1, n2)
# print("corr")
# print(corr)
# TODO expose
# angular_alpha_tilde = 0.0001 / dt / dt
# angular_relaxation = 0.5
# TODO fix this constraint
# angular_correction(
# corr, inertial_q_p, inertial_q_c, m_inv_p, m_inv_c, I_inv_p, I_inv_c,
# angular_alpha_tilde, angular_relaxation, deltas, id_p, id_c)
lambda_n = compute_angular_correction_3d(
corr,
inertial_q_p,
inertial_q_c,
m_inv_p,
m_inv_c,
I_inv_p,
I_inv_c,
angular_alpha_tilde,
damping,
dt,
)
lambda_n *= angular_relaxation
ncorr = wp.normalize(corr)
ang_delta_p -= lambda_n * ncorr
ang_delta_c += lambda_n * ncorr
# handle joint targets
target_ke = joint_target_ke[axis_start]
# target_kd = joint_target_kd[axis_start]
target = joint_target[axis_start]
if target_ke > 0.0:
# find a perpendicular vector to joint axis
a = axis
# https://math.stackexchange.com/a/3582461
g = wp.sign(a[2])
h = a[2] + g
b = wp.vec3(g - a[0] * a[0] / h, -a[0] * a[1] / h, -a[0])
c = wp.normalize(wp.cross(a, b))
b = c
q = wp.quat_from_axis_angle(a_p, target)
b_target = wp.quat_rotate(q, wp.quat_rotate(q_p, b))
b2 = wp.quat_rotate(q_c, b)
# Eq. 21
d_target = wp.cross(b_target, b2)
target_compliance = 1.0 / target_ke # / dt / dt
# angular_correction(
# d_target, inertial_q_p, inertial_q_c, m_inv_p, m_inv_c, I_inv_p, I_inv_c,
# target_compliance, angular_relaxation, deltas, id_p, id_c)
lambda_n = compute_angular_correction_3d(
d_target, inertial_q_p, inertial_q_c, m_inv_p, m_inv_c, I_inv_p, I_inv_c, target_compliance, damping, dt
)
lambda_n *= angular_relaxation
ncorr = wp.normalize(d_target)
# TODO fix
ang_delta_p -= lambda_n * ncorr
ang_delta_c += lambda_n * ncorr
if (type == wp.sim.JOINT_FIXED) or (type == wp.sim.JOINT_PRISMATIC):
# align the mutual orientations of the two bodies
# Eq. 18-19
q = q_p * wp.quat_inverse(q_c)
corr = -2.0 * wp.vec3(q[0], q[1], q[2])
# angular_correction(
# -corr, inertial_q_p, inertial_q_c, m_inv_p, m_inv_c, I_inv_p, I_inv_c,
# angular_alpha_tilde, angular_relaxation, deltas, id_p, id_c)
lambda_n = compute_angular_correction_3d(
corr, inertial_q_p, inertial_q_c, m_inv_p, m_inv_c, I_inv_p, I_inv_c, angular_alpha_tilde, damping, dt
)
lambda_n *= angular_relaxation
ncorr = wp.normalize(corr)
ang_delta_p -= lambda_n * ncorr
ang_delta_c += lambda_n * ncorr
# handle positional constraints
# joint connection points
x_p = wp.transform_get_translation(X_wp)
x_c = wp.transform_get_translation(X_wc)
# compute error between the joint attachment points on both bodies
# delta x is the difference of point r_2 minus point r_1 (Fig. 3)
dx = x_c - x_p
# rotate the error vector into the joint frame
q_dx = q_p
# q_dx = q_c
# q_dx = wp.transform_get_rotation(pose_p)
dx = wp.quat_rotate_inv(q_dx, dx)
lower_pos_limits = wp.vec3(0.0)
upper_pos_limits = wp.vec3(0.0)
if type == wp.sim.JOINT_PRISMATIC:
lower_pos_limits = axis * limit_lower
upper_pos_limits = axis * limit_upper
# compute linear constraint violations
corr = wp.vec3(0.0)
zero = wp.vec3(0.0)
corr -= vec_leaky_min(zero, upper_pos_limits - dx)
corr -= vec_leaky_max(zero, lower_pos_limits - dx)
# if (type == wp.sim.JOINT_PRISMATIC):
# if mode == JOINT_MODE_TARGET_POSITION:
# target = wp.clamp(target, limit_lower, limit_upper)
# if target_ke > 0.0:
# err = dx - target * axis
# compliance = 1.0 / target_ke
# damping = axis_damping[dim]
# elif mode == JOINT_MODE_TARGET_VELOCITY:
# if target_ke > 0.0:
# err = (derr - target) * dt
# compliance = 1.0 / target_ke
# damping = axis_damping[dim]
# rotate correction vector into world frame
corr = wp.quat_rotate(q_dx, corr)
lambda_in = 0.0
linear_alpha = joint_linear_compliance[tid]
lambda_n = compute_linear_correction_3d(
corr, r_p, r_c, pose_p, pose_c, m_inv_p, m_inv_c, I_inv_p, I_inv_c, lambda_in, linear_alpha, damping, dt
)
lambda_n *= linear_relaxation
n = wp.normalize(corr)
lin_delta_p -= n * lambda_n
lin_delta_c += n * lambda_n
ang_delta_p -= wp.cross(r_p, n) * lambda_n
ang_delta_c += wp.cross(r_c, n) * lambda_n
if id_p >= 0:
wp.atomic_add(deltas, id_p, wp.spatial_vector(ang_delta_p, lin_delta_p))
if id_c >= 0:
wp.atomic_add(deltas, id_c, wp.spatial_vector(ang_delta_c, lin_delta_c))
@wp.kernel
def solve_body_joints(
body_q: wp.array(dtype=wp.transform),
body_qd: wp.array(dtype=wp.spatial_vector),
body_com: wp.array(dtype=wp.vec3),
body_inv_m: wp.array(dtype=float),
body_inv_I: wp.array(dtype=wp.mat33),
joint_type: wp.array(dtype=int),
joint_enabled: wp.array(dtype=int),
joint_parent: wp.array(dtype=int),
joint_child: wp.array(dtype=int),
joint_X_p: wp.array(dtype=wp.transform),
joint_X_c: wp.array(dtype=wp.transform),
joint_limit_lower: wp.array(dtype=float),
joint_limit_upper: wp.array(dtype=float),
joint_axis_start: wp.array(dtype=int),
joint_axis_dim: wp.array(dtype=int, ndim=2),
joint_axis_mode: wp.array(dtype=int),
joint_axis: wp.array(dtype=wp.vec3),
joint_act: wp.array(dtype=float),
joint_target_ke: wp.array(dtype=float),
joint_target_kd: wp.array(dtype=float),
joint_linear_compliance: wp.array(dtype=float),
joint_angular_compliance: wp.array(dtype=float),
angular_relaxation: float,
linear_relaxation: float,
dt: float,
deltas: wp.array(dtype=wp.spatial_vector),
):
tid = wp.tid()
type = joint_type[tid]
if joint_enabled[tid] == 0:
return
if type == wp.sim.JOINT_FREE:
return
# if type == wp.sim.JOINT_FIXED:
# return
# if type == wp.sim.JOINT_REVOLUTE:
# return
# if type == wp.sim.JOINT_PRISMATIC:
# return
# if type == wp.sim.JOINT_BALL:
# return
# rigid body indices of the child and parent
id_c = joint_child[tid]
id_p = joint_parent[tid]
X_pj = joint_X_p[tid]
X_cj = joint_X_c[tid]
X_wp = X_pj
m_inv_p = 0.0
I_inv_p = wp.mat33(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
pose_p = X_pj
com_p = wp.vec3(0.0)
vel_p = wp.vec3(0.0)
omega_p = wp.vec3(0.0)
# parent transform and moment arm
if id_p >= 0:
pose_p = body_q[id_p]
X_wp = pose_p * X_wp
com_p = body_com[id_p]
m_inv_p = body_inv_m[id_p]
I_inv_p = body_inv_I[id_p]
vel_p = wp.spatial_bottom(body_qd[id_p])
omega_p = wp.spatial_top(body_qd[id_p])
# child transform and moment arm
pose_c = body_q[id_c]
X_wc = pose_c * X_cj
com_c = body_com[id_c]
m_inv_c = body_inv_m[id_c]
I_inv_c = body_inv_I[id_c]
vel_c = wp.spatial_bottom(body_qd[id_c])
omega_c = wp.spatial_top(body_qd[id_c])
if m_inv_p == 0.0 and m_inv_c == 0.0:
# connection between two immovable bodies
return
# accumulate constraint deltas
lin_delta_p = wp.vec3(0.0)
ang_delta_p = wp.vec3(0.0)
lin_delta_c = wp.vec3(0.0)
ang_delta_c = wp.vec3(0.0)
rel_pose = wp.transform_inverse(X_wp) * X_wc
rel_p = wp.transform_get_translation(rel_pose)
# joint connection points
# x_p = wp.transform_get_translation(X_wp)
x_c = wp.transform_get_translation(X_wc)
linear_compliance = joint_linear_compliance[tid]
angular_compliance = joint_angular_compliance[tid]
axis_start = joint_axis_start[tid]
lin_axis_count = joint_axis_dim[tid, 0]
ang_axis_count = joint_axis_dim[tid, 1]
world_com_p = wp.transform_point(pose_p, com_p)
world_com_c = wp.transform_point(pose_c, com_c)
# handle positional constraints
if type == wp.sim.JOINT_DISTANCE:
r_p = wp.transform_get_translation(X_wp) - world_com_p
r_c = wp.transform_get_translation(X_wc) - world_com_c
lower = joint_limit_lower[axis_start]
upper = joint_limit_upper[axis_start]
if lower < 0.0 and upper < 0.0:
# no limits
return
d = wp.length(rel_p)
err = 0.0
if lower >= 0.0 and d < lower:
err = d - lower
# use a more descriptive direction vector for the constraint
# in case the joint parent and child anchors are very close
rel_p = err * wp.normalize(world_com_c - world_com_p)
elif upper >= 0.0 and d > upper:
err = d - upper
if wp.abs(err) > 1e-9:
# compute gradients
linear_c = rel_p
linear_p = -linear_c
r_c = x_c - world_com_c
angular_p = -wp.cross(r_p, linear_c)
angular_c = wp.cross(r_c, linear_c)
# constraint time derivative
derr = (
wp.dot(linear_p, vel_p)
+ wp.dot(linear_c, vel_c)
+ wp.dot(angular_p, omega_p)
+ wp.dot(angular_c, omega_c)
)
lambda_in = 0.0
compliance = linear_compliance
ke = joint_target_ke[axis_start]
if ke > 0.0:
compliance = 1.0 / ke
damping = joint_target_kd[axis_start]
d_lambda = compute_positional_correction(
err,
derr,
pose_p,
pose_c,
m_inv_p,
m_inv_c,
I_inv_p,
I_inv_c,
linear_p,
linear_c,
angular_p,
angular_c,
lambda_in,
compliance,
damping,
dt,
)
lin_delta_p += linear_p * (d_lambda * linear_relaxation)
ang_delta_p += angular_p * (d_lambda * angular_relaxation)
lin_delta_c += linear_c * (d_lambda * linear_relaxation)
ang_delta_c += angular_c * (d_lambda * angular_relaxation)
else:
# compute joint target, stiffness, damping
ke_sum = float(0.0)
axis_limits = wp.spatial_vector(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
axis_mode = wp.vec3i(0, 0, 0)
axis_target_ke_kd = wp.mat33(0.0)
# avoid a for loop here since local variables would need to be modified which is not yet differentiable
if lin_axis_count > 0:
axis = joint_axis[axis_start]
lo_temp = axis * joint_limit_lower[axis_start]
up_temp = axis * joint_limit_upper[axis_start]
axis_limits = wp.spatial_vector(vec_min(lo_temp, up_temp), vec_max(lo_temp, up_temp))
mode = joint_axis_mode[axis_start]
if mode != JOINT_MODE_FORCE: # position or velocity target
ke = joint_target_ke[axis_start]
kd = joint_target_kd[axis_start]
target = joint_act[axis_start]
axis_mode = update_joint_axis_mode(mode, axis, axis_mode)
axis_target_ke_kd = update_joint_axis_target_ke_kd(axis, target, ke, kd, axis_target_ke_kd)
ke_sum += ke
if lin_axis_count > 1:
axis_idx = axis_start + 1
axis = joint_axis[axis_idx]
lower = joint_limit_lower[axis_idx]
upper = joint_limit_upper[axis_idx]
axis_limits = update_joint_axis_limits(axis, lower, upper, axis_limits)
mode = joint_axis_mode[axis_idx]
if mode != JOINT_MODE_FORCE: # position or velocity target
ke = joint_target_ke[axis_idx]
kd = joint_target_kd[axis_idx]
target = joint_act[axis_idx]
axis_mode = update_joint_axis_mode(mode, axis, axis_mode)
axis_target_ke_kd = update_joint_axis_target_ke_kd(axis, target, ke, kd, axis_target_ke_kd)
ke_sum += ke
if lin_axis_count > 2:
axis_idx = axis_start + 2
axis = joint_axis[axis_idx]
lower = joint_limit_lower[axis_idx]
upper = joint_limit_upper[axis_idx]
axis_limits = update_joint_axis_limits(axis, lower, upper, axis_limits)
mode = joint_axis_mode[axis_idx]
if mode != JOINT_MODE_FORCE: # position or velocity target
ke = joint_target_ke[axis_idx]
kd = joint_target_kd[axis_idx]
target = joint_act[axis_idx]
axis_mode = update_joint_axis_mode(mode, axis, axis_mode)
axis_target_ke_kd = update_joint_axis_target_ke_kd(axis, target, ke, kd, axis_target_ke_kd)
ke_sum += ke
axis_target = axis_target_ke_kd[0]
axis_stiffness = axis_target_ke_kd[1]
axis_damping = axis_target_ke_kd[2]
if ke_sum > 0.0:
axis_target /= ke_sum
axis_limits_lower = wp.spatial_top(axis_limits)
axis_limits_upper = wp.spatial_bottom(axis_limits)
frame_p = wp.quat_to_matrix(wp.transform_get_rotation(X_wp))
# note that x_c appearing in both is correct
r_p = x_c - world_com_p
r_c = x_c - wp.transform_point(pose_c, com_c)
# for loop will be unrolled, so we can modify local variables
for dim in range(3):
e = rel_p[dim]
mode = axis_mode[dim]
# compute gradients
linear_c = wp.vec3(frame_p[0, dim], frame_p[1, dim], frame_p[2, dim])
linear_p = -linear_c
angular_p = -wp.cross(r_p, linear_c)
angular_c = wp.cross(r_c, linear_c)
# constraint time derivative
derr = (
wp.dot(linear_p, vel_p)
+ wp.dot(linear_c, vel_c)
+ wp.dot(angular_p, omega_p)
+ wp.dot(angular_c, omega_c)
)
err = 0.0
compliance = linear_compliance
damping = 0.0
# consider joint limits irrespective of axis mode
lower = axis_limits_lower[dim]
upper = axis_limits_upper[dim]
if e < lower:
err = e - lower
elif e > upper:
err = e - upper
else:
target = axis_target[dim]
if mode == JOINT_MODE_TARGET_POSITION:
target = wp.clamp(target, lower, upper)
if axis_stiffness[dim] > 0.0:
err = e - target
compliance = 1.0 / axis_stiffness[dim]
damping = axis_damping[dim]
elif mode == JOINT_MODE_TARGET_VELOCITY:
if axis_stiffness[dim] > 0.0:
err = (derr - target) * dt
compliance = 1.0 / axis_stiffness[dim]
damping = axis_damping[dim]
derr = 0.0
if wp.abs(err) > 1e-9:
lambda_in = 0.0
d_lambda = compute_positional_correction(
err,
derr,
pose_p,
pose_c,
m_inv_p,
m_inv_c,
I_inv_p,
I_inv_c,
linear_p,
linear_c,
angular_p,
angular_c,
lambda_in,
compliance,
damping,
dt,
)
lin_delta_p += linear_p * (d_lambda * linear_relaxation)
ang_delta_p += angular_p * (d_lambda * angular_relaxation)
lin_delta_c += linear_c * (d_lambda * linear_relaxation)
ang_delta_c += angular_c * (d_lambda * angular_relaxation)
if (
type == wp.sim.JOINT_FIXED
or type == wp.sim.JOINT_PRISMATIC
or type == wp.sim.JOINT_REVOLUTE
or type == wp.sim.JOINT_UNIVERSAL
or type == wp.sim.JOINT_COMPOUND
or type == wp.sim.JOINT_D6
):
# handle angular constraints
# local joint rotations
q_p = wp.transform_get_rotation(X_wp)
q_c = wp.transform_get_rotation(X_wc)
# make quats lie in same hemisphere
if wp.dot(q_p, q_c) < 0.0:
q_c *= -1.0
rel_q = wp.quat_inverse(q_p) * q_c
qtwist = wp.normalize(wp.quat(rel_q[0], 0.0, 0.0, rel_q[3]))
qswing = rel_q * wp.quat_inverse(qtwist)
# decompose to a compound rotation each axis
s = wp.sqrt(rel_q[0] * rel_q[0] + rel_q[3] * rel_q[3])
invs = 1.0 / s
invscube = invs * invs * invs
# handle axis-angle joints
# rescale twist from quaternion space to angular
err_0 = 2.0 * wp.asin(wp.clamp(qtwist[0], -1.0, 1.0))
err_1 = qswing[1]
err_2 = qswing[2]
# analytic gradients of swing-twist decomposition
grad_0 = wp.quat(invs - rel_q[0] * rel_q[0] * invscube, 0.0, 0.0, -(rel_q[3] * rel_q[0]) * invscube)
grad_1 = wp.quat(
-rel_q[3] * (rel_q[3] * rel_q[2] + rel_q[0] * rel_q[1]) * invscube,
rel_q[3] * invs,
-rel_q[0] * invs,
rel_q[0] * (rel_q[3] * rel_q[2] + rel_q[0] * rel_q[1]) * invscube,
)
grad_2 = wp.quat(
rel_q[3] * (rel_q[3] * rel_q[1] - rel_q[0] * rel_q[2]) * invscube,
rel_q[0] * invs,
rel_q[3] * invs,
rel_q[0] * (rel_q[2] * rel_q[0] - rel_q[3] * rel_q[1]) * invscube,
)
grad_0 *= 2.0 / wp.abs(qtwist[3])
# grad_0 *= 2.0 / wp.sqrt(1.0-qtwist[0]*qtwist[0]) # derivative of asin(x) = 1/sqrt(1-x^2)
# rescale swing
swing_sq = qswing[3] * qswing[3]
# if swing axis magnitude close to zero vector, just treat in quaternion space
angularEps = 1.0e-4
if swing_sq + angularEps < 1.0:
d = wp.sqrt(1.0 - qswing[3] * qswing[3])
theta = 2.0 * wp.acos(wp.clamp(qswing[3], -1.0, 1.0))
scale = theta / d
err_1 *= scale
err_2 *= scale
grad_1 *= scale
grad_2 *= scale
errs = wp.vec3(err_0, err_1, err_2)
grad_x = wp.vec3(grad_0[0], grad_1[0], grad_2[0])
grad_y = wp.vec3(grad_0[1], grad_1[1], grad_2[1])
grad_z = wp.vec3(grad_0[2], grad_1[2], grad_2[2])
grad_w = wp.vec3(grad_0[3], grad_1[3], grad_2[3])
# compute joint target, stiffness, damping
ke_sum = float(0.0)
axis_limits = wp.spatial_vector(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
axis_mode = wp.vec3i(0, 0, 0)
axis_target_ke_kd = wp.mat33(0.0)
# avoid a for loop here since local variables would need to be modified which is not yet differentiable
if ang_axis_count > 0:
axis_idx = axis_start + lin_axis_count
axis = joint_axis[axis_idx]
lo_temp = axis * joint_limit_lower[axis_idx]
up_temp = axis * joint_limit_upper[axis_idx]
axis_limits = wp.spatial_vector(vec_min(lo_temp, up_temp), vec_max(lo_temp, up_temp))
mode = joint_axis_mode[axis_idx]
if mode != JOINT_MODE_FORCE: # position or velocity target
ke = joint_target_ke[axis_idx]
kd = joint_target_kd[axis_idx]
target = joint_act[axis_idx]
axis_mode = update_joint_axis_mode(mode, axis, axis_mode)
axis_target_ke_kd = update_joint_axis_target_ke_kd(axis, target, ke, kd, axis_target_ke_kd)
ke_sum += ke
if ang_axis_count > 1:
axis_idx = axis_start + lin_axis_count + 1
axis = joint_axis[axis_idx]
lower = joint_limit_lower[axis_idx]
upper = joint_limit_upper[axis_idx]
axis_limits = update_joint_axis_limits(axis, lower, upper, axis_limits)
mode = joint_axis_mode[axis_idx]
if mode != JOINT_MODE_FORCE: # position or velocity target
ke = joint_target_ke[axis_idx]
kd = joint_target_kd[axis_idx]
target = joint_act[axis_idx]
axis_mode = update_joint_axis_mode(mode, axis, axis_mode)
axis_target_ke_kd = update_joint_axis_target_ke_kd(axis, target, ke, kd, axis_target_ke_kd)
ke_sum += ke
if ang_axis_count > 2:
axis_idx = axis_start + lin_axis_count + 2
axis = joint_axis[axis_idx]
lower = joint_limit_lower[axis_idx]
upper = joint_limit_upper[axis_idx]
axis_limits = update_joint_axis_limits(axis, lower, upper, axis_limits)
mode = joint_axis_mode[axis_idx]
if mode != JOINT_MODE_FORCE: # position or velocity target
ke = joint_target_ke[axis_idx]
kd = joint_target_kd[axis_idx]
target = joint_act[axis_idx]
axis_mode = update_joint_axis_mode(mode, axis, axis_mode)
axis_target_ke_kd = update_joint_axis_target_ke_kd(axis, target, ke, kd, axis_target_ke_kd)
ke_sum += ke
axis_target = axis_target_ke_kd[0]
axis_stiffness = axis_target_ke_kd[1]
axis_damping = axis_target_ke_kd[2]
if ke_sum > 0.0:
axis_target /= ke_sum
axis_limits_lower = wp.spatial_top(axis_limits)
axis_limits_upper = wp.spatial_bottom(axis_limits)
# if type == wp.sim.JOINT_D6:
# wp.printf("axis_target: %f %f %f\t axis_stiffness: %f %f %f\t axis_damping: %f %f %f\t axis_limits_lower: %f %f %f \t axis_limits_upper: %f %f %f\n",
# axis_target[0], axis_target[1], axis_target[2],
# axis_stiffness[0], axis_stiffness[1], axis_stiffness[2],
# axis_damping[0], axis_damping[1], axis_damping[2],
# axis_limits_lower[0], axis_limits_lower[1], axis_limits_lower[2],
# axis_limits_upper[0], axis_limits_upper[1], axis_limits_upper[2])
# # wp.printf("wp.sqrt(1.0-qtwist[0]*qtwist[0]) = %f\n", wp.sqrt(1.0-qtwist[0]*qtwist[0]))
for dim in range(3):
e = errs[dim]
mode = axis_mode[dim]
# analytic gradients of swing-twist decomposition
grad = wp.quat(grad_x[dim], grad_y[dim], grad_z[dim], grad_w[dim])
quat_c = 0.5 * q_p * grad * wp.quat_inverse(q_c)
angular_c = wp.vec3(quat_c[0], quat_c[1], quat_c[2])
angular_p = -angular_c
# time derivative of the constraint
derr = wp.dot(angular_p, omega_p) + wp.dot(angular_c, omega_c)
err = 0.0
compliance = angular_compliance
damping = 0.0
# consider joint limits irrespective of mode
lower = axis_limits_lower[dim]
upper = axis_limits_upper[dim]
if e < lower:
err = e - lower
elif e > upper:
err = e - upper
else:
target = axis_target[dim]
if mode == JOINT_MODE_TARGET_POSITION:
target = wp.clamp(target, lower, upper)
if axis_stiffness[dim] > 0.0:
err = e - target
compliance = 1.0 / axis_stiffness[dim]
damping = axis_damping[dim]
elif mode == JOINT_MODE_TARGET_VELOCITY:
if axis_stiffness[dim] > 0.0:
err = (derr - target) * dt
compliance = 1.0 / axis_stiffness[dim]
damping = axis_damping[dim]
derr = 0.0
d_lambda = (
compute_angular_correction(
err, derr, pose_p, pose_c, I_inv_p, I_inv_c, angular_p, angular_c, 0.0, compliance, damping, dt
)
* angular_relaxation
)
# update deltas
ang_delta_p += angular_p * d_lambda
ang_delta_c += angular_c * d_lambda
if id_p >= 0:
wp.atomic_add(deltas, id_p, wp.spatial_vector(ang_delta_p, lin_delta_p))
if id_c >= 0:
wp.atomic_add(deltas, id_c, wp.spatial_vector(ang_delta_c, lin_delta_c))
@wp.func
def compute_contact_constraint_delta(
err: float,
tf_a: wp.transform,
tf_b: wp.transform,
m_inv_a: float,
m_inv_b: float,
I_inv_a: wp.mat33,
I_inv_b: wp.mat33,
linear_a: wp.vec3,
linear_b: wp.vec3,
angular_a: wp.vec3,
angular_b: wp.vec3,
relaxation: float,
dt: float,
) -> float:
denom = 0.0
denom += wp.length_sq(linear_a) * m_inv_a
denom += wp.length_sq(linear_b) * m_inv_b
q1 = wp.transform_get_rotation(tf_a)
q2 = wp.transform_get_rotation(tf_b)
# Eq. 2-3 (make sure to project into the frame of the body)
rot_angular_a = wp.quat_rotate_inv(q1, angular_a)
rot_angular_b = wp.quat_rotate_inv(q2, angular_b)
denom += wp.dot(rot_angular_a, I_inv_a * rot_angular_a)
denom += wp.dot(rot_angular_b, I_inv_b * rot_angular_b)
delta_lambda = -err
if denom > 0.0:
delta_lambda /= dt * denom
return delta_lambda * relaxation
@wp.func
def compute_positional_correction(
err: float,
derr: float,
tf_a: wp.transform,
tf_b: wp.transform,
m_inv_a: float,
m_inv_b: float,
I_inv_a: wp.mat33,
I_inv_b: wp.mat33,
linear_a: wp.vec3,
linear_b: wp.vec3,
angular_a: wp.vec3,
angular_b: wp.vec3,
lambda_in: float,
compliance: float,
damping: float,
dt: float,
) -> float:
denom = 0.0
denom += wp.length_sq(linear_a) * m_inv_a
denom += wp.length_sq(linear_b) * m_inv_b
q1 = wp.transform_get_rotation(tf_a)
q2 = wp.transform_get_rotation(tf_b)
# Eq. 2-3 (make sure to project into the frame of the body)
rot_angular_a = wp.quat_rotate_inv(q1, angular_a)
rot_angular_b = wp.quat_rotate_inv(q2, angular_b)
denom += wp.dot(rot_angular_a, I_inv_a * rot_angular_a)
denom += wp.dot(rot_angular_b, I_inv_b * rot_angular_b)
alpha = compliance
gamma = compliance * damping
delta_lambda = -(err + alpha * lambda_in + gamma * derr)
if denom + alpha > 0.0:
delta_lambda /= (dt + gamma) * denom + alpha / dt
return delta_lambda
@wp.func
def compute_angular_correction(
err: float,
derr: float,
tf_a: wp.transform,
tf_b: wp.transform,
I_inv_a: wp.mat33,
I_inv_b: wp.mat33,
angular_a: wp.vec3,
angular_b: wp.vec3,
lambda_in: float,
compliance: float,
damping: float,
dt: float,
) -> float:
denom = 0.0
q1 = wp.transform_get_rotation(tf_a)
q2 = wp.transform_get_rotation(tf_b)
# Eq. 2-3 (make sure to project into the frame of the body)
rot_angular_a = wp.quat_rotate_inv(q1, angular_a)
rot_angular_b = wp.quat_rotate_inv(q2, angular_b)
denom += wp.dot(rot_angular_a, I_inv_a * rot_angular_a)
denom += wp.dot(rot_angular_b, I_inv_b * rot_angular_b)
alpha = compliance
gamma = compliance * damping
delta_lambda = -(err + alpha * lambda_in + gamma * derr)
if denom + alpha > 0.0:
delta_lambda /= (dt + gamma) * denom + alpha / dt
return delta_lambda
@wp.kernel
def solve_body_contact_positions(
body_q: wp.array(dtype=wp.transform),
body_qd: wp.array(dtype=wp.spatial_vector),
body_com: wp.array(dtype=wp.vec3),
body_m_inv: wp.array(dtype=float),
body_I_inv: wp.array(dtype=wp.mat33),
shape_body: wp.array(dtype=int),
contact_count: wp.array(dtype=int),
contact_point0: wp.array(dtype=wp.vec3),
contact_point1: wp.array(dtype=wp.vec3),
contact_offset0: wp.array(dtype=wp.vec3),
contact_offset1: wp.array(dtype=wp.vec3),
contact_normal: wp.array(dtype=wp.vec3),
contact_thickness: wp.array(dtype=float),
contact_shape0: wp.array(dtype=int),
contact_shape1: wp.array(dtype=int),
shape_materials: ModelShapeMaterials,
relaxation: float,
dt: float,
contact_torsional_friction: float,
contact_rolling_friction: float,
# outputs
deltas: wp.array(dtype=wp.spatial_vector),
contact_inv_weight: wp.array(dtype=float),
):
tid = wp.tid()
count = contact_count[0]
if tid >= count:
return
shape_a = contact_shape0[tid]
shape_b = contact_shape1[tid]
if shape_a == shape_b:
return
body_a = -1
if shape_a >= 0:
body_a = shape_body[shape_a]
body_b = -1
if shape_b >= 0:
body_b = shape_body[shape_b]
if body_a == body_b:
return
# find body to world transform
X_wb_a = wp.transform_identity()
X_wb_b = wp.transform_identity()
if body_a >= 0:
X_wb_a = body_q[body_a]
if body_b >= 0:
X_wb_b = body_q[body_b]
# compute body position in world space
bx_a = wp.transform_point(X_wb_a, contact_point0[tid])
bx_b = wp.transform_point(X_wb_b, contact_point1[tid])
thickness = contact_thickness[tid]
n = -contact_normal[tid]
d = wp.dot(n, bx_b - bx_a) - thickness
if d >= 0.0:
return
m_inv_a = 0.0
m_inv_b = 0.0
I_inv_a = wp.mat33(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
I_inv_b = wp.mat33(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
# center of mass in body frame
com_a = wp.vec3(0.0)
com_b = wp.vec3(0.0)
# body to world transform
X_wb_a = wp.transform_identity()
X_wb_b = wp.transform_identity()
# angular velocities
omega_a = wp.vec3(0.0)
omega_b = wp.vec3(0.0)
# contact offset in body frame
offset_a = contact_offset0[tid]
offset_b = contact_offset1[tid]
if body_a >= 0:
X_wb_a = body_q[body_a]
com_a = body_com[body_a]
m_inv_a = body_m_inv[body_a]
I_inv_a = body_I_inv[body_a]
omega_a = wp.spatial_top(body_qd[body_a])
if body_b >= 0:
X_wb_b = body_q[body_b]
com_b = body_com[body_b]
m_inv_b = body_m_inv[body_b]
I_inv_b = body_I_inv[body_b]
omega_b = wp.spatial_top(body_qd[body_b])
# use average contact material properties
mat_nonzero = 0
mu = 0.0
if shape_a >= 0:
mat_nonzero += 1
mu += shape_materials.mu[shape_a]
if shape_b >= 0:
mat_nonzero += 1
mu += shape_materials.mu[shape_b]
if mat_nonzero > 0:
mu /= float(mat_nonzero)
r_a = bx_a - wp.transform_point(X_wb_a, com_a)
r_b = bx_b - wp.transform_point(X_wb_b, com_b)
angular_a = -wp.cross(r_a, n)
angular_b = wp.cross(r_b, n)
if contact_inv_weight:
if body_a >= 0:
wp.atomic_add(contact_inv_weight, body_a, 1.0)
if body_b >= 0:
wp.atomic_add(contact_inv_weight, body_b, 1.0)
lambda_n = compute_contact_constraint_delta(
d, X_wb_a, X_wb_b, m_inv_a, m_inv_b, I_inv_a, I_inv_b, -n, n, angular_a, angular_b, relaxation, dt
)
lin_delta_a = -n * lambda_n
lin_delta_b = n * lambda_n
ang_delta_a = angular_a * lambda_n
ang_delta_b = angular_b * lambda_n
# linear friction
if mu > 0.0:
# add on displacement from surface offsets, this ensures we include any rotational effects due to thickness from feature
# need to use the current rotation to account for friction due to angular effects (e.g.: slipping contact)
bx_a += wp.transform_vector(X_wb_a, offset_a)
bx_b += wp.transform_vector(X_wb_b, offset_b)
# update delta
delta = bx_b - bx_a
friction_delta = delta - wp.dot(n, delta) * n
perp = wp.normalize(friction_delta)
r_a = bx_a - wp.transform_point(X_wb_a, com_a)
r_b = bx_b - wp.transform_point(X_wb_b, com_b)
angular_a = -wp.cross(r_a, perp)
angular_b = wp.cross(r_b, perp)
err = wp.length(friction_delta)
if err > 0.0:
lambda_fr = compute_contact_constraint_delta(
err, X_wb_a, X_wb_b, m_inv_a, m_inv_b, I_inv_a, I_inv_b, -perp, perp, angular_a, angular_b, 1.0, dt
)
# limit friction based on incremental normal force, good approximation to limiting on total force
lambda_fr = wp.max(lambda_fr, -lambda_n * mu)
lin_delta_a -= perp * lambda_fr
lin_delta_b += perp * lambda_fr
ang_delta_a += angular_a * lambda_fr
ang_delta_b += angular_b * lambda_fr
torsional_friction = mu * contact_torsional_friction
delta_omega = omega_b - omega_a
if torsional_friction > 0.0:
err = wp.dot(delta_omega, n) * dt
if wp.abs(err) > 0.0:
lin = wp.vec3(0.0)
lambda_torsion = compute_contact_constraint_delta(
err, X_wb_a, X_wb_b, m_inv_a, m_inv_b, I_inv_a, I_inv_b, lin, lin, -n, n, 1.0, dt
)
lambda_torsion = wp.clamp(lambda_torsion, -lambda_n * torsional_friction, lambda_n * torsional_friction)
ang_delta_a -= n * lambda_torsion
ang_delta_b += n * lambda_torsion
rolling_friction = mu * contact_rolling_friction
if rolling_friction > 0.0:
delta_omega -= wp.dot(n, delta_omega) * n
err = wp.length(delta_omega) * dt
if err > 0.0:
lin = wp.vec3(0.0)
roll_n = wp.normalize(delta_omega)
lambda_roll = compute_contact_constraint_delta(
err, X_wb_a, X_wb_b, m_inv_a, m_inv_b, I_inv_a, I_inv_b, lin, lin, -roll_n, roll_n, 1.0, dt
)
lambda_roll = wp.max(lambda_roll, -lambda_n * rolling_friction)
ang_delta_a -= roll_n * lambda_roll
ang_delta_b += roll_n * lambda_roll
if body_a >= 0:
wp.atomic_add(deltas, body_a, wp.spatial_vector(ang_delta_a, lin_delta_a))
if body_b >= 0:
wp.atomic_add(deltas, body_b, wp.spatial_vector(ang_delta_b, lin_delta_b))
@wp.kernel
def update_body_velocities(
poses: wp.array(dtype=wp.transform),
poses_prev: wp.array(dtype=wp.transform),
body_com: wp.array(dtype=wp.vec3),
dt: float,
qd_out: wp.array(dtype=wp.spatial_vector),
):
tid = wp.tid()
pose = poses[tid]
pose_prev = poses_prev[tid]
x = wp.transform_get_translation(pose)
x_prev = wp.transform_get_translation(pose_prev)
q = wp.transform_get_rotation(pose)
q_prev = wp.transform_get_rotation(pose_prev)
# Update body velocities according to Alg. 2
# XXX we consider the body COM as the origin of the body frame
x_com = x + wp.quat_rotate(q, body_com[tid])
x_com_prev = x_prev + wp.quat_rotate(q_prev, body_com[tid])
# XXX consider the velocity of the COM
v = (x_com - x_com_prev) / dt
dq = q * wp.quat_inverse(q_prev)
omega = 2.0 / dt * wp.vec3(dq[0], dq[1], dq[2])
if dq[3] < 0.0:
omega = -omega
qd_out[tid] = wp.spatial_vector(omega, v)
@wp.kernel
def apply_rigid_restitution(
body_q: wp.array(dtype=wp.transform),
body_qd: wp.array(dtype=wp.spatial_vector),
body_q_prev: wp.array(dtype=wp.transform),
body_qd_prev: wp.array(dtype=wp.spatial_vector),
body_com: wp.array(dtype=wp.vec3),
body_m_inv: wp.array(dtype=float),
body_I_inv: wp.array(dtype=wp.mat33),
shape_body: wp.array(dtype=int),
contact_count: wp.array(dtype=int),
contact_normal: wp.array(dtype=wp.vec3),
contact_shape0: wp.array(dtype=int),
contact_shape1: wp.array(dtype=int),
shape_materials: ModelShapeMaterials,
contact_point0: wp.array(dtype=wp.vec3),
contact_point1: wp.array(dtype=wp.vec3),
contact_offset0: wp.array(dtype=wp.vec3),
contact_offset1: wp.array(dtype=wp.vec3),
contact_thickness: wp.array(dtype=float),
contact_inv_weight: wp.array(dtype=float),
gravity: wp.vec3,
dt: float,
# outputs
deltas: wp.array(dtype=wp.spatial_vector),
):
tid = wp.tid()
count = contact_count[0]
if tid >= count:
return
shape_a = contact_shape0[tid]
shape_b = contact_shape1[tid]
if shape_a == shape_b:
return
body_a = -1
body_b = -1
# use average contact material properties
mat_nonzero = 0
restitution = 0.0
if shape_a >= 0:
mat_nonzero += 1
restitution += shape_materials.restitution[shape_a]
body_a = shape_body[shape_a]
if shape_b >= 0:
mat_nonzero += 1
restitution += shape_materials.restitution[shape_b]
body_b = shape_body[shape_b]
if mat_nonzero > 0:
restitution /= float(mat_nonzero)
if body_a == body_b:
return
m_inv_a = 0.0
m_inv_b = 0.0
I_inv_a = wp.mat33(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
I_inv_b = wp.mat33(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
# body to world transform
X_wb_a_prev = wp.transform_identity()
X_wb_b_prev = wp.transform_identity()
# center of mass in body frame
com_a = wp.vec3(0.0)
com_b = wp.vec3(0.0)
# previous velocity at contact points
v_a = wp.vec3(0.0)
v_b = wp.vec3(0.0)
# new velocity at contact points
v_a_new = wp.vec3(0.0)
v_b_new = wp.vec3(0.0)
# inverse mass used to compute the impulse
inv_mass = 0.0
if body_a >= 0:
X_wb_a_prev = body_q_prev[body_a]
# X_wb_a = body_q[body_a]
m_inv_a = body_m_inv[body_a]
I_inv_a = body_I_inv[body_a]
com_a = body_com[body_a]
if body_b >= 0:
X_wb_b_prev = body_q_prev[body_b]
# X_wb_b = body_q[body_b]
m_inv_b = body_m_inv[body_b]
I_inv_b = body_I_inv[body_b]
com_b = body_com[body_b]
# compute body position in world space
bx_a = wp.transform_point(X_wb_a_prev, contact_point0[tid] + contact_offset0[tid])
bx_b = wp.transform_point(X_wb_b_prev, contact_point1[tid] + contact_offset1[tid])
thickness = contact_thickness[tid]
n = contact_normal[tid]
d = -wp.dot(n, bx_b - bx_a) - thickness
if d >= 0.0:
return
r_a = bx_a - wp.transform_point(X_wb_a_prev, com_a)
r_b = bx_b - wp.transform_point(X_wb_b_prev, com_b)
rxn_a = wp.vec3(0.0)
rxn_b = wp.vec3(0.0)
if body_a >= 0:
v_a = velocity_at_point(body_qd_prev[body_a], r_a) + gravity * dt
v_a_new = velocity_at_point(body_qd[body_a], r_a)
q_a = wp.transform_get_rotation(X_wb_a_prev)
rxn_a = wp.quat_rotate_inv(q_a, wp.cross(r_a, n))
# Eq. 2
inv_mass_a = m_inv_a + wp.dot(rxn_a, I_inv_a * rxn_a)
# if contact_inv_weight:
# if contact_inv_weight[body_a] > 0.0:
# inv_mass_a *= contact_inv_weight[body_a]
inv_mass += inv_mass_a
if body_b >= 0:
v_b = velocity_at_point(body_qd_prev[body_b], r_b) + gravity * dt
v_b_new = velocity_at_point(body_qd[body_b], r_b)
q_b = wp.transform_get_rotation(X_wb_b_prev)
rxn_b = wp.quat_rotate_inv(q_b, wp.cross(r_b, n))
# Eq. 3
inv_mass_b = m_inv_b + wp.dot(rxn_b, I_inv_b * rxn_b)
# if contact_inv_weight:
# if contact_inv_weight[body_b] > 0.0:
# inv_mass_b *= contact_inv_weight[body_b]
inv_mass += inv_mass_b
if inv_mass == 0.0:
return
# Eq. 29
rel_vel_old = wp.dot(n, v_a - v_b)
rel_vel_new = wp.dot(n, v_a_new - v_b_new)
if rel_vel_old >= 0.0:
return
# Eq. 34
dv = (-rel_vel_new - restitution * rel_vel_old) / inv_mass
# Eq. 33
if body_a >= 0:
dv_a = dv
# if contact_inv_weight:
# if contact_inv_weight[body_a] > 0.0:
# dv_a *= contact_inv_weight[body_a]
q_a = wp.transform_get_rotation(X_wb_a_prev)
dq = wp.quat_rotate(q_a, I_inv_a * rxn_a * dv_a)
wp.atomic_add(deltas, body_a, wp.spatial_vector(dq, n * m_inv_a * dv_a))
if body_b >= 0:
dv_b = -dv
# if contact_inv_weight:
# if contact_inv_weight[body_b] > 0.0:
# dv_b *= contact_inv_weight[body_b]
q_b = wp.transform_get_rotation(X_wb_b_prev)
dq = wp.quat_rotate(q_b, I_inv_b * rxn_b * dv_b)
wp.atomic_add(deltas, body_b, wp.spatial_vector(dq, n * m_inv_b * dv_b))
class XPBDIntegrator(Integrator):
"""An implicit integrator using eXtended Position-Based Dynamics (XPBD) for rigid and soft body simulation.
References:
- Miles Macklin, Matthias Müller, and Nuttapong Chentanez. 2016. XPBD: position-based simulation of compliant constrained dynamics. In Proceedings of the 9th International Conference on Motion in Games (MIG '16). Association for Computing Machinery, New York, NY, USA, 49-54. https://doi.org/10.1145/2994258.2994272
- Matthias Müller, Miles Macklin, Nuttapong Chentanez, Stefan Jeschke, and Tae-Yong Kim. 2020. Detailed rigid body simulation with extended position based dynamics. In Proceedings of the ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA '20). Eurographics Association, Goslar, DEU, Article 10, 1-12. https://doi.org/10.1111/cgf.14105
After constructing :class:`Model`, :class:`State`, and :class:`Control` (optional) objects, this time-integrator
may be used to advance the simulation state forward in time.
Example
-------
.. code-block:: python
integrator = wp.XPBDIntegrator()
# simulation loop
for i in range(100):
state = integrator.simulate(model, state_in, state_out, dt, control)
"""
def __init__(
self,
iterations=2,
soft_body_relaxation=0.9,
soft_contact_relaxation=0.9,
joint_linear_relaxation=0.7,
joint_angular_relaxation=0.4,
rigid_contact_relaxation=0.8,
rigid_contact_con_weighting=True,
angular_damping=0.0,
enable_restitution=False,
):
self.iterations = iterations
self.soft_body_relaxation = soft_body_relaxation
self.soft_contact_relaxation = soft_contact_relaxation
self.joint_linear_relaxation = joint_linear_relaxation
self.joint_angular_relaxation = joint_angular_relaxation
self.rigid_contact_relaxation = rigid_contact_relaxation
self.rigid_contact_con_weighting = rigid_contact_con_weighting
self.angular_damping = angular_damping
self.enable_restitution = enable_restitution
self.compute_body_velocity_from_position_delta = False
# helper variables to track constraint resolution vars
self._particle_delta_counter = 0
self._body_delta_counter = 0
def apply_particle_deltas(
self,
model: Model,
state_in: State,
state_out: State,
particle_deltas: wp.array,
dt: float,
):
if state_in.requires_grad:
particle_q = state_out.particle_q
# allocate new particle arrays so gradients can be tracked correctly without overwriting
new_particle_q = wp.empty_like(state_out.particle_q)
new_particle_qd = wp.empty_like(state_out.particle_qd)
self._particle_delta_counter += 1
else:
if self._particle_delta_counter == 0:
particle_q = state_out.particle_q
new_particle_q = state_in.particle_q
new_particle_qd = state_in.particle_qd
else:
particle_q = state_in.particle_q
new_particle_q = state_out.particle_q
new_particle_qd = state_out.particle_qd
self._particle_delta_counter = 1 - self._particle_delta_counter
wp.launch(
kernel=apply_particle_deltas,
dim=model.particle_count,
inputs=[
self.particle_q_init,
particle_q,
model.particle_flags,
particle_deltas,
dt,
model.particle_max_velocity,
],
outputs=[new_particle_q, new_particle_qd],
device=model.device,
)
if state_in.requires_grad:
state_out.particle_q = new_particle_q
state_out.particle_qd = new_particle_qd
return new_particle_q, new_particle_qd
def apply_body_deltas(
self,
model: Model,
state_in: State,
state_out: State,
body_deltas: wp.array,
dt: float,
rigid_contact_inv_weight: wp.array = None,
):
with wp.ScopedTimer("apply_body_deltas", False):
if state_in.requires_grad:
body_q = state_out.body_q
body_qd = state_out.body_qd
new_body_q = wp.clone(body_q)
new_body_qd = wp.clone(body_qd)
self._body_delta_counter += 1
else:
if self._body_delta_counter == 0:
body_q = state_out.body_q
body_qd = state_out.body_qd
new_body_q = state_in.body_q
new_body_qd = state_in.body_qd
else:
body_q = state_in.body_q
body_qd = state_in.body_qd
new_body_q = state_out.body_q
new_body_qd = state_out.body_qd
self._body_delta_counter = 1 - self._body_delta_counter
wp.launch(
kernel=apply_body_deltas,
dim=model.body_count,
inputs=[
body_q,
body_qd,
model.body_com,
model.body_inertia,
model.body_inv_mass,
model.body_inv_inertia,
body_deltas,
rigid_contact_inv_weight,
dt,
],
outputs=[
new_body_q,
new_body_qd,
],
device=model.device,
)
if state_in.requires_grad:
state_out.body_q = new_body_q
state_out.body_qd = new_body_qd
return new_body_q, new_body_qd
def simulate(self, model: Model, state_in: State, state_out: State, dt: float, control: Control = None):
requires_grad = state_in.requires_grad
self._particle_delta_counter = 0
self._body_delta_counter = 0
particle_q = None
particle_qd = None
particle_deltas = None
body_q = None
body_qd = None
body_deltas = None
rigid_contact_inv_weight = None
if model.rigid_contact_max > 0:
if self.rigid_contact_con_weighting:
rigid_contact_inv_weight = wp.zeros_like(model.rigid_contact_thickness)
rigid_contact_inv_weight_init = None
if control is None:
control = model.control(clone_variables=False)
with wp.ScopedTimer("simulate", False):
if model.particle_count:
if requires_grad:
particle_q = state_out.particle_q
particle_qd = state_out.particle_qd
else:
particle_q = state_out.particle_q
particle_qd = state_out.particle_qd
self.particle_q_init = wp.clone(state_in.particle_q)
if self.enable_restitution:
self.particle_qd_init = wp.clone(state_in.particle_qd)
particle_deltas = wp.empty_like(state_out.particle_qd)
self.integrate_particles(model, state_in, state_out, dt)
if model.body_count:
body_q = state_out.body_q
body_qd = state_out.body_qd
if self.compute_body_velocity_from_position_delta or self.enable_restitution:
body_q_init = wp.clone(state_in.body_q)
body_qd_init = wp.clone(state_in.body_qd)
body_deltas = wp.empty_like(state_out.body_qd)
if model.joint_count:
wp.launch(
kernel=apply_joint_torques,
dim=model.joint_count,
inputs=[
state_in.body_q,
model.body_com,
model.joint_q_start,
model.joint_qd_start,
model.joint_type,
model.joint_parent,
model.joint_child,
model.joint_X_p,
model.joint_X_c,
model.joint_axis_start,
model.joint_axis_dim,
model.joint_axis,
model.joint_axis_mode,
control.joint_act,
],
outputs=[state_in.body_f],
device=model.device,
)
self.integrate_bodies(model, state_in, state_out, dt, self.angular_damping)
spring_constraint_lambdas = None
if model.spring_count:
spring_constraint_lambdas = wp.empty_like(model.spring_rest_length)
edge_constraint_lambdas = None
if model.edge_count:
edge_constraint_lambdas = wp.empty_like(model.edge_rest_angle)
for i in range(self.iterations):
with wp.ScopedTimer(f"iteration_{i}", False):
if model.body_count:
if requires_grad and i > 0:
body_deltas = wp.zeros_like(body_deltas)
else:
body_deltas.zero_()
if model.particle_count:
if requires_grad and i > 0:
particle_deltas = wp.zeros_like(particle_deltas)
else:
particle_deltas.zero_()
# particle ground contact
if model.ground:
wp.launch(
kernel=solve_particle_ground_contacts,
dim=model.particle_count,
inputs=[
particle_q,
particle_qd,
model.particle_inv_mass,
model.particle_radius,
model.particle_flags,
model.soft_contact_ke,
model.soft_contact_kd,
model.soft_contact_kf,
model.soft_contact_mu,
model.ground_plane,
dt,
self.soft_contact_relaxation,
],
outputs=[particle_deltas],
device=model.device,
)
# particle-rigid body contacts (besides ground plane)
if model.shape_count > 1:
wp.launch(
kernel=solve_particle_shape_contacts,
dim=model.soft_contact_max,
inputs=[
particle_q,
particle_qd,
model.particle_inv_mass,
model.particle_radius,
model.particle_flags,
body_q,
body_qd,
model.body_com,
model.body_inv_mass,
model.body_inv_inertia,
model.shape_body,
model.shape_materials,
model.soft_contact_mu,
model.particle_adhesion,
model.soft_contact_count,
model.soft_contact_particle,
model.soft_contact_shape,
model.soft_contact_body_pos,
model.soft_contact_body_vel,
model.soft_contact_normal,
model.soft_contact_max,
dt,
self.soft_contact_relaxation,
],
# outputs
outputs=[particle_deltas, body_deltas],
device=model.device,
)
if model.particle_max_radius > 0.0 and model.particle_count > 1:
# assert model.particle_grid.reserved, "model.particle_grid must be built, see HashGrid.build()"
wp.launch(
kernel=solve_particle_particle_contacts,
dim=model.particle_count,
inputs=[
model.particle_grid.id,
particle_q,
particle_qd,
model.particle_inv_mass,
model.particle_radius,
model.particle_flags,
model.particle_mu,
model.particle_cohesion,
model.particle_max_radius,
dt,
self.soft_contact_relaxation,
],
outputs=[particle_deltas],
device=model.device,
)
# distance constraints
if model.spring_count:
spring_constraint_lambdas.zero_()
wp.launch(
kernel=solve_springs,
dim=model.spring_count,
inputs=[
particle_q,
particle_qd,
model.particle_inv_mass,
model.spring_indices,
model.spring_rest_length,
model.spring_stiffness,
model.spring_damping,
dt,
spring_constraint_lambdas,
],
outputs=[particle_deltas],
device=model.device,
)
# bending constraints
if model.edge_count:
edge_constraint_lambdas.zero_()
wp.launch(
kernel=bending_constraint,
dim=model.edge_count,
inputs=[
particle_q,
particle_qd,
model.particle_inv_mass,
model.edge_indices,
model.edge_rest_angle,
model.edge_bending_properties,
dt,
edge_constraint_lambdas,
],
outputs=[particle_deltas],
device=model.device,
)
# tetrahedral FEM
if model.tet_count:
wp.launch(
kernel=solve_tetrahedra,
dim=model.tet_count,
inputs=[
particle_q,
particle_qd,
model.particle_inv_mass,
model.tet_indices,
model.tet_poses,
model.tet_activations,
model.tet_materials,
dt,
self.soft_body_relaxation,
],
outputs=[particle_deltas],
device=model.device,
)
particle_q, particle_qd = self.apply_particle_deltas(
model, state_in, state_out, particle_deltas, dt
)
# handle rigid bodies
# ----------------------------
if model.joint_count:
# wp.launch(
# kernel=solve_simple_body_joints,
# dim=model.joint_count,
# inputs=[
# body_q,
# body_qd,
# model.body_com,
# model.body_inv_mass,
# model.body_inv_inertia,
# model.joint_type,
# model.joint_enabled,
# model.joint_parent,
# model.joint_child,
# model.joint_X_p,
# model.joint_X_c,
# model.joint_limit_lower,
# model.joint_limit_upper,
# model.joint_axis_start,
# model.joint_axis_dim,
# model.joint_axis_mode,
# model.joint_axis,
# control.joint_target,
# model.joint_target_ke,
# model.joint_target_kd,
# model.joint_linear_compliance,
# model.joint_angular_compliance,
# self.joint_angular_relaxation,
# self.joint_linear_relaxation,
# dt,
# ],
# outputs=[body_deltas],
# device=model.device,
# )
wp.launch(
kernel=solve_body_joints,
dim=model.joint_count,
inputs=[
body_q,
body_qd,
model.body_com,
model.body_inv_mass,
model.body_inv_inertia,
model.joint_type,
model.joint_enabled,
model.joint_parent,
model.joint_child,
model.joint_X_p,
model.joint_X_c,
model.joint_limit_lower,
model.joint_limit_upper,
model.joint_axis_start,
model.joint_axis_dim,
model.joint_axis_mode,
model.joint_axis,
control.joint_act,
model.joint_target_ke,
model.joint_target_kd,
model.joint_linear_compliance,
model.joint_angular_compliance,
self.joint_angular_relaxation,
self.joint_linear_relaxation,
dt,
],
outputs=[body_deltas],
device=model.device,
)
body_q, body_qd = self.apply_body_deltas(model, state_in, state_out, body_deltas, dt)
# Solve rigid contact constraints
if model.rigid_contact_max and (
model.ground and model.shape_ground_contact_pair_count or model.shape_contact_pair_count
):
if self.rigid_contact_con_weighting:
rigid_contact_inv_weight.zero_()
body_deltas.zero_()
wp.launch(
kernel=solve_body_contact_positions,
dim=model.rigid_contact_max,
inputs=[
body_q,
body_qd,
model.body_com,
model.body_inv_mass,
model.body_inv_inertia,
model.shape_body,
model.rigid_contact_count,
model.rigid_contact_point0,
model.rigid_contact_point1,
model.rigid_contact_offset0,
model.rigid_contact_offset1,
model.rigid_contact_normal,
model.rigid_contact_thickness,
model.rigid_contact_shape0,
model.rigid_contact_shape1,
model.shape_materials,
self.rigid_contact_relaxation,
dt,
model.rigid_contact_torsional_friction,
model.rigid_contact_rolling_friction,
],
outputs=[
body_deltas,
rigid_contact_inv_weight,
],
device=model.device,
)
# if model.rigid_contact_count.numpy()[0] > 0:
# print("rigid_contact_count:", model.rigid_contact_count.numpy().flatten())
# # print("rigid_active_contact_distance:", rigid_active_contact_distance.numpy().flatten())
# # print("rigid_active_contact_point0:", rigid_active_contact_point0.numpy().flatten())
# # print("rigid_active_contact_point1:", rigid_active_contact_point1.numpy().flatten())
# print("body_deltas:", body_deltas.numpy().flatten())
# print(rigid_active_contact_distance.numpy().flatten())
if self.enable_restitution and i == 0:
# remember contact constraint weighting from the first iteration
if self.rigid_contact_con_weighting:
rigid_contact_inv_weight_init = wp.clone(rigid_contact_inv_weight)
else:
rigid_contact_inv_weight_init = None
body_q, body_qd = self.apply_body_deltas(
model, state_in, state_out, body_deltas, dt, rigid_contact_inv_weight
)
if model.particle_count:
if particle_q.ptr != state_out.particle_q.ptr:
state_out.particle_q.assign(particle_q)
state_out.particle_qd.assign(particle_qd)
if model.body_count:
if body_q.ptr != state_out.body_q.ptr:
state_out.body_q.assign(body_q)
state_out.body_qd.assign(body_qd)
# update body velocities from position changes
if self.compute_body_velocity_from_position_delta and model.body_count and not requires_grad:
# causes gradient issues (probably due to numerical problems
# when computing velocities from position changes)
if requires_grad:
out_body_qd = wp.clone(state_out.body_qd)
else:
out_body_qd = state_out.body_qd
# update body velocities
wp.launch(
kernel=update_body_velocities,
dim=model.body_count,
inputs=[state_out.body_q, body_q_init, model.body_com, dt],
outputs=[out_body_qd],
device=model.device,
)
if self.enable_restitution:
if model.particle_count:
wp.launch(
kernel=apply_particle_shape_restitution,
dim=model.particle_count,
inputs=[
particle_q,
particle_qd,
self.particle_q_init,
self.particle_qd_init,
model.particle_inv_mass,
model.particle_radius,
model.particle_flags,
body_q,
body_qd,
model.body_com,
model.body_inv_mass,
model.body_inv_inertia,
model.shape_body,
model.shape_materials,
model.particle_adhesion,
model.soft_contact_restitution,
model.soft_contact_count,
model.soft_contact_particle,
model.soft_contact_shape,
model.soft_contact_body_pos,
model.soft_contact_body_vel,
model.soft_contact_normal,
model.soft_contact_max,
dt,
self.soft_contact_relaxation,
],
outputs=[state_out.particle_qd],
device=model.device,
)
if model.ground:
wp.launch(
kernel=apply_particle_ground_restitution,
dim=model.particle_count,
inputs=[
particle_q,
particle_qd,
self.particle_q_init,
self.particle_qd_init,
model.particle_inv_mass,
model.particle_radius,
model.particle_flags,
model.particle_adhesion,
model.soft_contact_restitution,
model.ground_plane,
dt,
self.soft_contact_relaxation,
],
outputs=[state_out.particle_qd],
device=model.device,
)
if model.body_count:
body_deltas.zero_()
wp.launch(
kernel=apply_rigid_restitution,
dim=model.rigid_contact_max,
inputs=[
state_out.body_q,
state_out.body_qd,
body_q_init,
body_qd_init,
model.body_com,
model.body_inv_mass,
model.body_inv_inertia,
model.shape_body,
model.rigid_contact_count,
model.rigid_contact_normal,
model.rigid_contact_shape0,
model.rigid_contact_shape1,
model.shape_materials,
model.rigid_contact_point0,
model.rigid_contact_point1,
model.rigid_contact_offset0,
model.rigid_contact_offset1,
model.rigid_contact_thickness,
rigid_contact_inv_weight_init,
model.gravity,
dt,
],
outputs=[
body_deltas,
],
device=model.device,
)
wp.launch(
kernel=apply_body_delta_velocities,
dim=model.body_count,
inputs=[
body_deltas,
],
outputs=[state_out.body_qd],
device=model.device,
)
return state_out
| 115,420 | Python | 34.029135 | 354 | 0.512823 |
NVIDIA/warp/warp/sim/particles.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import warp as wp
from .model import PARTICLE_FLAG_ACTIVE
@wp.func
def particle_force(n: wp.vec3, v: wp.vec3, c: float, k_n: float, k_d: float, k_f: float, k_mu: float):
# compute normal and tangential friction force for a single contact
vn = wp.dot(n, v)
jn = c * k_n
jd = min(vn, 0.0) * k_d
# contact force
fn = jn + jd
# friction force
vt = v - n * vn
vs = wp.length(vt)
if vs > 0.0:
vt = vt / vs
# Coulomb condition
ft = wp.min(vs * k_f, k_mu * wp.abs(fn))
# total force
return -n * fn - vt * ft
@wp.kernel
def eval_particle_forces_kernel(
grid: wp.uint64,
particle_x: wp.array(dtype=wp.vec3),
particle_v: wp.array(dtype=wp.vec3),
particle_radius: wp.array(dtype=float),
particle_flags: wp.array(dtype=wp.uint32),
k_contact: float,
k_damp: float,
k_friction: float,
k_mu: float,
k_cohesion: float,
max_radius: float,
# outputs
particle_f: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
# order threads by cell
i = wp.hash_grid_point_id(grid, tid)
if i == -1:
# hash grid has not been built yet
return
if (particle_flags[i] & PARTICLE_FLAG_ACTIVE) == 0:
return
x = particle_x[i]
v = particle_v[i]
radius = particle_radius[i]
f = wp.vec3()
# particle contact
query = wp.hash_grid_query(grid, x, radius + max_radius + k_cohesion)
index = int(0)
count = int(0)
while wp.hash_grid_query_next(query, index):
if (particle_flags[index] & PARTICLE_FLAG_ACTIVE) != 0 and index != i:
# compute distance to point
n = x - particle_x[index]
d = wp.length(n)
err = d - radius - particle_radius[index]
count += 1
if err <= k_cohesion:
n = n / d
vrel = v - particle_v[index]
f = f + particle_force(n, vrel, err, k_contact, k_damp, k_friction, k_mu)
particle_f[i] = f
def eval_particle_forces(model, state, forces):
if model.particle_count > 1 and model.particle_max_radius > 0.0:
wp.launch(
kernel=eval_particle_forces_kernel,
dim=model.particle_count,
inputs=[
model.particle_grid.id,
state.particle_q,
state.particle_qd,
model.particle_radius,
model.particle_flags,
model.particle_ke,
model.particle_kd,
model.particle_kf,
model.particle_mu,
model.particle_cohesion,
model.particle_max_radius,
],
outputs=[forces],
device=model.device,
)
| 3,165 | Python | 26.77193 | 102 | 0.576935 |
NVIDIA/warp/warp/sim/collide.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""
Collision handling functions and kernels.
"""
import warp as wp
from .model import PARTICLE_FLAG_ACTIVE, ModelShapeGeometry
@wp.func
def triangle_closest_point_barycentric(a: wp.vec3, b: wp.vec3, c: wp.vec3, p: wp.vec3):
ab = b - a
ac = c - a
ap = p - a
d1 = wp.dot(ab, ap)
d2 = wp.dot(ac, ap)
if d1 <= 0.0 and d2 <= 0.0:
return wp.vec3(1.0, 0.0, 0.0)
bp = p - b
d3 = wp.dot(ab, bp)
d4 = wp.dot(ac, bp)
if d3 >= 0.0 and d4 <= d3:
return wp.vec3(0.0, 1.0, 0.0)
vc = d1 * d4 - d3 * d2
v = d1 / (d1 - d3)
if vc <= 0.0 and d1 >= 0.0 and d3 <= 0.0:
return wp.vec3(1.0 - v, v, 0.0)
cp = p - c
d5 = wp.dot(ab, cp)
d6 = wp.dot(ac, cp)
if d6 >= 0.0 and d5 <= d6:
return wp.vec3(0.0, 0.0, 1.0)
vb = d5 * d2 - d1 * d6
w = d2 / (d2 - d6)
if vb <= 0.0 and d2 >= 0.0 and d6 <= 0.0:
return wp.vec3(1.0 - w, 0.0, w)
va = d3 * d6 - d5 * d4
w = (d4 - d3) / ((d4 - d3) + (d5 - d6))
if va <= 0.0 and (d4 - d3) >= 0.0 and (d5 - d6) >= 0.0:
return wp.vec3(0.0, w, 1.0 - w)
denom = 1.0 / (va + vb + vc)
v = vb * denom
w = vc * denom
return wp.vec3(1.0 - v - w, v, w)
@wp.func
def sphere_sdf(center: wp.vec3, radius: float, p: wp.vec3):
return wp.length(p - center) - radius
@wp.func
def sphere_sdf_grad(center: wp.vec3, radius: float, p: wp.vec3):
return wp.normalize(p - center)
@wp.func
def box_sdf(upper: wp.vec3, p: wp.vec3):
# adapted from https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm
qx = abs(p[0]) - upper[0]
qy = abs(p[1]) - upper[1]
qz = abs(p[2]) - upper[2]
e = wp.vec3(wp.max(qx, 0.0), wp.max(qy, 0.0), wp.max(qz, 0.0))
return wp.length(e) + wp.min(wp.max(qx, wp.max(qy, qz)), 0.0)
@wp.func
def box_sdf_grad(upper: wp.vec3, p: wp.vec3):
qx = abs(p[0]) - upper[0]
qy = abs(p[1]) - upper[1]
qz = abs(p[2]) - upper[2]
# exterior case
if qx > 0.0 or qy > 0.0 or qz > 0.0:
x = wp.clamp(p[0], -upper[0], upper[0])
y = wp.clamp(p[1], -upper[1], upper[1])
z = wp.clamp(p[2], -upper[2], upper[2])
return wp.normalize(p - wp.vec3(x, y, z))
sx = wp.sign(p[0])
sy = wp.sign(p[1])
sz = wp.sign(p[2])
# x projection
if qx > qy and qx > qz or qy == 0.0 and qz == 0.0:
return wp.vec3(sx, 0.0, 0.0)
# y projection
if qy > qx and qy > qz or qx == 0.0 and qz == 0.0:
return wp.vec3(0.0, sy, 0.0)
# z projection
return wp.vec3(0.0, 0.0, sz)
@wp.func
def capsule_sdf(radius: float, half_height: float, p: wp.vec3):
if p[1] > half_height:
return wp.length(wp.vec3(p[0], p[1] - half_height, p[2])) - radius
if p[1] < -half_height:
return wp.length(wp.vec3(p[0], p[1] + half_height, p[2])) - radius
return wp.length(wp.vec3(p[0], 0.0, p[2])) - radius
@wp.func
def capsule_sdf_grad(radius: float, half_height: float, p: wp.vec3):
if p[1] > half_height:
return wp.normalize(wp.vec3(p[0], p[1] - half_height, p[2]))
if p[1] < -half_height:
return wp.normalize(wp.vec3(p[0], p[1] + half_height, p[2]))
return wp.normalize(wp.vec3(p[0], 0.0, p[2]))
@wp.func
def cylinder_sdf(radius: float, half_height: float, p: wp.vec3):
dx = wp.length(wp.vec3(p[0], 0.0, p[2])) - radius
dy = wp.abs(p[1]) - half_height
return wp.min(wp.max(dx, dy), 0.0) + wp.length(wp.vec2(wp.max(dx, 0.0), wp.max(dy, 0.0)))
@wp.func
def cylinder_sdf_grad(radius: float, half_height: float, p: wp.vec3):
dx = wp.length(wp.vec3(p[0], 0.0, p[2])) - radius
dy = wp.abs(p[1]) - half_height
if dx > dy:
return wp.normalize(wp.vec3(p[0], 0.0, p[2]))
return wp.vec3(0.0, wp.sign(p[1]), 0.0)
@wp.func
def cone_sdf(radius: float, half_height: float, p: wp.vec3):
dx = wp.length(wp.vec3(p[0], 0.0, p[2])) - radius * (p[1] + half_height) / (2.0 * half_height)
dy = wp.abs(p[1]) - half_height
return wp.min(wp.max(dx, dy), 0.0) + wp.length(wp.vec2(wp.max(dx, 0.0), wp.max(dy, 0.0)))
@wp.func
def cone_sdf_grad(radius: float, half_height: float, p: wp.vec3):
dx = wp.length(wp.vec3(p[0], 0.0, p[2])) - radius * (p[1] + half_height) / (2.0 * half_height)
dy = wp.abs(p[1]) - half_height
if dy < 0.0 or dx == 0.0:
return wp.vec3(0.0, wp.sign(p[1]), 0.0)
return wp.normalize(wp.vec3(p[0], 0.0, p[2])) + wp.vec3(0.0, radius / (2.0 * half_height), 0.0)
@wp.func
def plane_sdf(width: float, length: float, p: wp.vec3):
# SDF for a quad in the xz plane
if width > 0.0 and length > 0.0:
d = wp.max(wp.abs(p[0]) - width, wp.abs(p[2]) - length)
return wp.max(d, wp.abs(p[1]))
return p[1]
@wp.func
def closest_point_plane(width: float, length: float, point: wp.vec3):
# projects the point onto the quad in the xz plane (if width and length > 0.0, otherwise the plane is infinite)
if width > 0.0:
x = wp.clamp(point[0], -width, width)
else:
x = point[0]
if length > 0.0:
z = wp.clamp(point[2], -length, length)
else:
z = point[2]
return wp.vec3(x, 0.0, z)
@wp.func
def closest_point_line_segment(a: wp.vec3, b: wp.vec3, point: wp.vec3):
ab = b - a
ap = point - a
t = wp.dot(ap, ab) / wp.dot(ab, ab)
t = wp.clamp(t, 0.0, 1.0)
return a + t * ab
@wp.func
def closest_point_box(upper: wp.vec3, point: wp.vec3):
# closest point to box surface
x = wp.clamp(point[0], -upper[0], upper[0])
y = wp.clamp(point[1], -upper[1], upper[1])
z = wp.clamp(point[2], -upper[2], upper[2])
if wp.abs(point[0]) <= upper[0] and wp.abs(point[1]) <= upper[1] and wp.abs(point[2]) <= upper[2]:
# the point is inside, find closest face
sx = wp.abs(wp.abs(point[0]) - upper[0])
sy = wp.abs(wp.abs(point[1]) - upper[1])
sz = wp.abs(wp.abs(point[2]) - upper[2])
# return closest point on closest side, handle corner cases
if sx < sy and sx < sz or sy == 0.0 and sz == 0.0:
x = wp.sign(point[0]) * upper[0]
elif sy < sx and sy < sz or sx == 0.0 and sz == 0.0:
y = wp.sign(point[1]) * upper[1]
else:
z = wp.sign(point[2]) * upper[2]
return wp.vec3(x, y, z)
@wp.func
def get_box_vertex(point_id: int, upper: wp.vec3):
# box vertex numbering:
# 6---7
# |\ |\ y
# | 2-+-3 |
# 4-+-5 | z \|
# \| \| o---x
# 0---1
# get the vertex of the box given its ID (0-7)
sign_x = float(point_id % 2) * 2.0 - 1.0
sign_y = float((point_id // 2) % 2) * 2.0 - 1.0
sign_z = float((point_id // 4) % 2) * 2.0 - 1.0
return wp.vec3(sign_x * upper[0], sign_y * upper[1], sign_z * upper[2])
@wp.func
def get_box_edge(edge_id: int, upper: wp.vec3):
# get the edge of the box given its ID (0-11)
if edge_id < 4:
# edges along x: 0-1, 2-3, 4-5, 6-7
i = edge_id * 2
j = i + 1
return wp.spatial_vector(get_box_vertex(i, upper), get_box_vertex(j, upper))
elif edge_id < 8:
# edges along y: 0-2, 1-3, 4-6, 5-7
edge_id -= 4
i = edge_id % 2 + edge_id // 2 * 4
j = i + 2
return wp.spatial_vector(get_box_vertex(i, upper), get_box_vertex(j, upper))
# edges along z: 0-4, 1-5, 2-6, 3-7
edge_id -= 8
i = edge_id
j = i + 4
return wp.spatial_vector(get_box_vertex(i, upper), get_box_vertex(j, upper))
@wp.func
def get_plane_edge(edge_id: int, plane_width: float, plane_length: float):
# get the edge of the plane given its ID (0-3)
p0x = (2.0 * float(edge_id % 2) - 1.0) * plane_width
p0z = (2.0 * float(edge_id // 2) - 1.0) * plane_length
if edge_id == 0 or edge_id == 3:
p1x = p0x
p1z = -p0z
else:
p1x = -p0x
p1z = p0z
return wp.spatial_vector(wp.vec3(p0x, 0.0, p0z), wp.vec3(p1x, 0.0, p1z))
@wp.func
def closest_edge_coordinate_box(upper: wp.vec3, edge_a: wp.vec3, edge_b: wp.vec3, max_iter: int):
# find point on edge closest to box, return its barycentric edge coordinate
# Golden-section search
a = float(0.0)
b = float(1.0)
h = b - a
invphi = 0.61803398875 # 1 / phi
invphi2 = 0.38196601125 # 1 / phi^2
c = a + invphi2 * h
d = a + invphi * h
query = (1.0 - c) * edge_a + c * edge_b
yc = box_sdf(upper, query)
query = (1.0 - d) * edge_a + d * edge_b
yd = box_sdf(upper, query)
for _k in range(max_iter):
if yc < yd: # yc > yd to find the maximum
b = d
d = c
yd = yc
h = invphi * h
c = a + invphi2 * h
query = (1.0 - c) * edge_a + c * edge_b
yc = box_sdf(upper, query)
else:
a = c
c = d
yc = yd
h = invphi * h
d = a + invphi * h
query = (1.0 - d) * edge_a + d * edge_b
yd = box_sdf(upper, query)
if yc < yd:
return 0.5 * (a + d)
return 0.5 * (c + b)
@wp.func
def closest_edge_coordinate_plane(
plane_width: float,
plane_length: float,
edge_a: wp.vec3,
edge_b: wp.vec3,
max_iter: int,
):
# find point on edge closest to plane, return its barycentric edge coordinate
# Golden-section search
a = float(0.0)
b = float(1.0)
h = b - a
invphi = 0.61803398875 # 1 / phi
invphi2 = 0.38196601125 # 1 / phi^2
c = a + invphi2 * h
d = a + invphi * h
query = (1.0 - c) * edge_a + c * edge_b
yc = plane_sdf(plane_width, plane_length, query)
query = (1.0 - d) * edge_a + d * edge_b
yd = plane_sdf(plane_width, plane_length, query)
for _k in range(max_iter):
if yc < yd: # yc > yd to find the maximum
b = d
d = c
yd = yc
h = invphi * h
c = a + invphi2 * h
query = (1.0 - c) * edge_a + c * edge_b
yc = plane_sdf(plane_width, plane_length, query)
else:
a = c
c = d
yc = yd
h = invphi * h
d = a + invphi * h
query = (1.0 - d) * edge_a + d * edge_b
yd = plane_sdf(plane_width, plane_length, query)
if yc < yd:
return 0.5 * (a + d)
return 0.5 * (c + b)
@wp.func
def closest_edge_coordinate_capsule(radius: float, half_height: float, edge_a: wp.vec3, edge_b: wp.vec3, max_iter: int):
# find point on edge closest to capsule, return its barycentric edge coordinate
# Golden-section search
a = float(0.0)
b = float(1.0)
h = b - a
invphi = 0.61803398875 # 1 / phi
invphi2 = 0.38196601125 # 1 / phi^2
c = a + invphi2 * h
d = a + invphi * h
query = (1.0 - c) * edge_a + c * edge_b
yc = capsule_sdf(radius, half_height, query)
query = (1.0 - d) * edge_a + d * edge_b
yd = capsule_sdf(radius, half_height, query)
for _k in range(max_iter):
if yc < yd: # yc > yd to find the maximum
b = d
d = c
yd = yc
h = invphi * h
c = a + invphi2 * h
query = (1.0 - c) * edge_a + c * edge_b
yc = capsule_sdf(radius, half_height, query)
else:
a = c
c = d
yc = yd
h = invphi * h
d = a + invphi * h
query = (1.0 - d) * edge_a + d * edge_b
yd = capsule_sdf(radius, half_height, query)
if yc < yd:
return 0.5 * (a + d)
return 0.5 * (c + b)
@wp.func
def mesh_sdf(mesh: wp.uint64, point: wp.vec3, max_dist: float):
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
res = wp.mesh_query_point_sign_normal(mesh, point, max_dist, sign, face_index, face_u, face_v)
if res:
closest = wp.mesh_eval_position(mesh, face_index, face_u, face_v)
return wp.length(point - closest) * sign
return max_dist
@wp.func
def closest_point_mesh(mesh: wp.uint64, point: wp.vec3, max_dist: float):
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
res = wp.mesh_query_point_sign_normal(mesh, point, max_dist, sign, face_index, face_u, face_v)
if res:
return wp.mesh_eval_position(mesh, face_index, face_u, face_v)
# return arbitrary point from mesh
return wp.mesh_eval_position(mesh, 0, 0.0, 0.0)
@wp.func
def closest_edge_coordinate_mesh(mesh: wp.uint64, edge_a: wp.vec3, edge_b: wp.vec3, max_iter: int, max_dist: float):
# find point on edge closest to mesh, return its barycentric edge coordinate
# Golden-section search
a = float(0.0)
b = float(1.0)
h = b - a
invphi = 0.61803398875 # 1 / phi
invphi2 = 0.38196601125 # 1 / phi^2
c = a + invphi2 * h
d = a + invphi * h
query = (1.0 - c) * edge_a + c * edge_b
yc = mesh_sdf(mesh, query, max_dist)
query = (1.0 - d) * edge_a + d * edge_b
yd = mesh_sdf(mesh, query, max_dist)
for _k in range(max_iter):
if yc < yd: # yc > yd to find the maximum
b = d
d = c
yd = yc
h = invphi * h
c = a + invphi2 * h
query = (1.0 - c) * edge_a + c * edge_b
yc = mesh_sdf(mesh, query, max_dist)
else:
a = c
c = d
yc = yd
h = invphi * h
d = a + invphi * h
query = (1.0 - d) * edge_a + d * edge_b
yd = mesh_sdf(mesh, query, max_dist)
if yc < yd:
return 0.5 * (a + d)
return 0.5 * (c + b)
@wp.func
def volume_grad(volume: wp.uint64, p: wp.vec3):
eps = 0.05 # TODO make this a parameter
q = wp.volume_world_to_index(volume, p)
# compute gradient of the SDF using finite differences
dx = wp.volume_sample_f(volume, q + wp.vec3(eps, 0.0, 0.0), wp.Volume.LINEAR) - wp.volume_sample_f(
volume, q - wp.vec3(eps, 0.0, 0.0), wp.Volume.LINEAR
)
dy = wp.volume_sample_f(volume, q + wp.vec3(0.0, eps, 0.0), wp.Volume.LINEAR) - wp.volume_sample_f(
volume, q - wp.vec3(0.0, eps, 0.0), wp.Volume.LINEAR
)
dz = wp.volume_sample_f(volume, q + wp.vec3(0.0, 0.0, eps), wp.Volume.LINEAR) - wp.volume_sample_f(
volume, q - wp.vec3(0.0, 0.0, eps), wp.Volume.LINEAR
)
return wp.normalize(wp.vec3(dx, dy, dz))
@wp.func
def counter_increment(counter: wp.array(dtype=int), counter_index: int, tids: wp.array(dtype=int), tid: int):
# increment counter, remember which thread received which counter value
next_count = wp.atomic_add(counter, counter_index, 1)
tids[tid] = next_count
return next_count
@wp.func_replay(counter_increment)
def replay_counter_increment(counter: wp.array(dtype=int), counter_index: int, tids: wp.array(dtype=int), tid: int):
return tids[tid]
@wp.func
def limited_counter_increment(
counter: wp.array(dtype=int), counter_index: int, tids: wp.array(dtype=int), tid: int, index_limit: int
):
# increment counter but only if it is smaller than index_limit, remember which thread received which counter value
next_count = wp.atomic_add(counter, counter_index, 1)
if next_count < index_limit or index_limit < 0:
tids[tid] = next_count
return next_count
tids[tid] = -1
return -1
@wp.func_replay(limited_counter_increment)
def replay_limited_counter_increment(
counter: wp.array(dtype=int), counter_index: int, tids: wp.array(dtype=int), tid: int, index_limit: int
):
return tids[tid]
@wp.kernel
def create_soft_contacts(
particle_x: wp.array(dtype=wp.vec3),
particle_radius: wp.array(dtype=float),
particle_flags: wp.array(dtype=wp.uint32),
body_X_wb: wp.array(dtype=wp.transform),
shape_X_bs: wp.array(dtype=wp.transform),
shape_body: wp.array(dtype=int),
geo: ModelShapeGeometry,
margin: float,
soft_contact_max: int,
shape_count: int,
# outputs
soft_contact_count: wp.array(dtype=int),
soft_contact_particle: wp.array(dtype=int),
soft_contact_shape: wp.array(dtype=int),
soft_contact_body_pos: wp.array(dtype=wp.vec3),
soft_contact_body_vel: wp.array(dtype=wp.vec3),
soft_contact_normal: wp.array(dtype=wp.vec3),
soft_contact_tids: wp.array(dtype=int),
):
tid = wp.tid()
particle_index, shape_index = tid // shape_count, tid % shape_count
if (particle_flags[particle_index] & PARTICLE_FLAG_ACTIVE) == 0:
return
rigid_index = shape_body[shape_index]
px = particle_x[particle_index]
radius = particle_radius[particle_index]
X_wb = wp.transform_identity()
if rigid_index >= 0:
X_wb = body_X_wb[rigid_index]
X_bs = shape_X_bs[shape_index]
X_ws = wp.transform_multiply(X_wb, X_bs)
X_sw = wp.transform_inverse(X_ws)
# transform particle position to shape local space
x_local = wp.transform_point(X_sw, px)
# geo description
geo_type = geo.type[shape_index]
geo_scale = geo.scale[shape_index]
# evaluate shape sdf
d = 1.0e6
n = wp.vec3()
v = wp.vec3()
if geo_type == wp.sim.GEO_SPHERE:
d = sphere_sdf(wp.vec3(), geo_scale[0], x_local)
n = sphere_sdf_grad(wp.vec3(), geo_scale[0], x_local)
if geo_type == wp.sim.GEO_BOX:
d = box_sdf(geo_scale, x_local)
n = box_sdf_grad(geo_scale, x_local)
if geo_type == wp.sim.GEO_CAPSULE:
d = capsule_sdf(geo_scale[0], geo_scale[1], x_local)
n = capsule_sdf_grad(geo_scale[0], geo_scale[1], x_local)
if geo_type == wp.sim.GEO_CYLINDER:
d = cylinder_sdf(geo_scale[0], geo_scale[1], x_local)
n = cylinder_sdf_grad(geo_scale[0], geo_scale[1], x_local)
if geo_type == wp.sim.GEO_CONE:
d = cone_sdf(geo_scale[0], geo_scale[1], x_local)
n = cone_sdf_grad(geo_scale[0], geo_scale[1], x_local)
if geo_type == wp.sim.GEO_MESH:
mesh = geo.source[shape_index]
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
min_scale = wp.min(geo_scale)
if wp.mesh_query_point_sign_normal(
mesh, wp.cw_div(x_local, geo_scale), margin + radius / min_scale, sign, face_index, face_u, face_v
):
shape_p = wp.mesh_eval_position(mesh, face_index, face_u, face_v)
shape_v = wp.mesh_eval_velocity(mesh, face_index, face_u, face_v)
shape_p = wp.cw_mul(shape_p, geo_scale)
shape_v = wp.cw_mul(shape_v, geo_scale)
delta = x_local - shape_p
d = wp.length(delta) * sign
n = wp.normalize(delta) * sign
v = shape_v
if geo_type == wp.sim.GEO_SDF:
volume = geo.source[shape_index]
xpred_local = wp.volume_world_to_index(volume, wp.cw_div(x_local, geo_scale))
nn = wp.vec3(0.0, 0.0, 0.0)
d = wp.volume_sample_grad_f(volume, xpred_local, wp.Volume.LINEAR, nn)
n = wp.normalize(nn)
if geo_type == wp.sim.GEO_PLANE:
d = plane_sdf(geo_scale[0], geo_scale[1], x_local)
n = wp.vec3(0.0, 1.0, 0.0)
if d < margin + radius:
index = counter_increment(soft_contact_count, 0, soft_contact_tids, tid)
if index < soft_contact_max:
# compute contact point in body local space
body_pos = wp.transform_point(X_bs, x_local - n * d)
body_vel = wp.transform_vector(X_bs, v)
world_normal = wp.transform_vector(X_ws, n)
soft_contact_shape[index] = shape_index
soft_contact_body_pos[index] = body_pos
soft_contact_body_vel[index] = body_vel
soft_contact_particle[index] = particle_index
soft_contact_normal[index] = world_normal
@wp.kernel(enable_backward=False)
def count_contact_points(
contact_pairs: wp.array(dtype=int, ndim=2),
geo: ModelShapeGeometry,
mesh_contact_max: int,
# outputs
contact_count: wp.array(dtype=int),
):
tid = wp.tid()
shape_a = contact_pairs[tid, 0]
shape_b = contact_pairs[tid, 1]
if shape_b == -1:
actual_shape_a = shape_a
actual_type_a = geo.type[shape_a]
# ground plane
actual_type_b = wp.sim.GEO_PLANE
actual_shape_b = -1
else:
type_a = geo.type[shape_a]
type_b = geo.type[shape_b]
# unique ordering of shape pairs
if type_a < type_b:
actual_shape_a = shape_a
actual_shape_b = shape_b
actual_type_a = type_a
actual_type_b = type_b
else:
actual_shape_a = shape_b
actual_shape_b = shape_a
actual_type_a = type_b
actual_type_b = type_a
# determine how many contact points need to be evaluated
num_contacts = 0
num_actual_contacts = 0
if actual_type_a == wp.sim.GEO_SPHERE:
num_contacts = 1
num_actual_contacts = 1
elif actual_type_a == wp.sim.GEO_CAPSULE:
if actual_type_b == wp.sim.GEO_PLANE:
if geo.scale[actual_shape_b][0] == 0.0 and geo.scale[actual_shape_b][1] == 0.0:
num_contacts = 2 # vertex-based collision for infinite plane
num_actual_contacts = 2
else:
num_contacts = 2 + 4 # vertex-based collision + plane edges
num_actual_contacts = 2 + 4
elif actual_type_b == wp.sim.GEO_MESH:
num_contacts_a = 2
mesh_b = wp.mesh_get(geo.source[actual_shape_b])
num_contacts_b = mesh_b.points.shape[0]
num_contacts = num_contacts_a + num_contacts_b
if mesh_contact_max > 0:
num_contacts_b = wp.min(mesh_contact_max, num_contacts_b)
num_actual_contacts = num_contacts_a + num_contacts_b
else:
num_contacts = 2
num_actual_contacts = 2
elif actual_type_a == wp.sim.GEO_BOX:
if actual_type_b == wp.sim.GEO_BOX:
num_contacts = 24
num_actual_contacts = 24
elif actual_type_b == wp.sim.GEO_MESH:
num_contacts_a = 8
mesh_b = wp.mesh_get(geo.source[actual_shape_b])
num_contacts_b = mesh_b.points.shape[0]
num_contacts = num_contacts_a + num_contacts_b
if mesh_contact_max > 0:
num_contacts_b = wp.min(mesh_contact_max, num_contacts_b)
num_actual_contacts = num_contacts_a + num_contacts_b
elif actual_type_b == wp.sim.GEO_PLANE:
if geo.scale[actual_shape_b][0] == 0.0 and geo.scale[actual_shape_b][1] == 0.0:
num_contacts = 8 # vertex-based collision
num_actual_contacts = 8
else:
num_contacts = 8 + 4 # vertex-based collision + plane edges
num_actual_contacts = 8 + 4
else:
num_contacts = 8
elif actual_type_a == wp.sim.GEO_MESH:
mesh_a = wp.mesh_get(geo.source[actual_shape_a])
num_contacts_a = mesh_a.points.shape[0]
if mesh_contact_max > 0:
num_contacts_a = wp.min(mesh_contact_max, num_contacts_a)
if actual_type_b == wp.sim.GEO_MESH:
mesh_b = wp.mesh_get(geo.source[actual_shape_b])
num_contacts_b = mesh_b.points.shape[0]
num_contacts = num_contacts_a + num_contacts_b
if mesh_contact_max > 0:
num_contacts_b = wp.min(mesh_contact_max, num_contacts_b)
else:
num_contacts_b = 0
num_contacts = num_contacts_a + num_contacts_b
num_actual_contacts = num_contacts_a + num_contacts_b
elif actual_type_a == wp.sim.GEO_PLANE:
return # no plane-plane contacts
else:
wp.printf(
"count_contact_points: unsupported geometry type combination %d and %d\n", actual_type_a, actual_type_b
)
wp.atomic_add(contact_count, 0, num_contacts)
wp.atomic_add(contact_count, 1, num_actual_contacts)
@wp.kernel(enable_backward=False)
def broadphase_collision_pairs(
contact_pairs: wp.array(dtype=int, ndim=2),
body_q: wp.array(dtype=wp.transform),
shape_X_bs: wp.array(dtype=wp.transform),
shape_body: wp.array(dtype=int),
body_mass: wp.array(dtype=float),
num_shapes: int,
geo: ModelShapeGeometry,
collision_radius: wp.array(dtype=float),
rigid_contact_max: int,
rigid_contact_margin: float,
mesh_contact_max: int,
iterate_mesh_vertices: bool,
# outputs
contact_count: wp.array(dtype=int),
contact_shape0: wp.array(dtype=int),
contact_shape1: wp.array(dtype=int),
contact_point_id: wp.array(dtype=int),
contact_point_limit: wp.array(dtype=int),
):
tid = wp.tid()
shape_a = contact_pairs[tid, 0]
shape_b = contact_pairs[tid, 1]
mass_a = 0.0
mass_b = 0.0
rigid_a = shape_body[shape_a]
if rigid_a == -1:
X_ws_a = shape_X_bs[shape_a]
else:
X_ws_a = wp.transform_multiply(body_q[rigid_a], shape_X_bs[shape_a])
mass_a = body_mass[rigid_a]
rigid_b = shape_body[shape_b]
if rigid_b == -1:
X_ws_b = shape_X_bs[shape_b]
else:
X_ws_b = wp.transform_multiply(body_q[rigid_b], shape_X_bs[shape_b])
mass_b = body_mass[rigid_b]
if mass_a == 0.0 and mass_b == 0.0:
# skip if both bodies are static
return
type_a = geo.type[shape_a]
type_b = geo.type[shape_b]
# unique ordering of shape pairs
if type_a < type_b:
actual_shape_a = shape_a
actual_shape_b = shape_b
actual_type_a = type_a
actual_type_b = type_b
actual_X_ws_a = X_ws_a
actual_X_ws_b = X_ws_b
else:
actual_shape_a = shape_b
actual_shape_b = shape_a
actual_type_a = type_b
actual_type_b = type_a
actual_X_ws_a = X_ws_b
actual_X_ws_b = X_ws_a
p_a = wp.transform_get_translation(actual_X_ws_a)
if actual_type_b == wp.sim.GEO_PLANE:
if actual_type_a == wp.sim.GEO_PLANE:
return
query_b = wp.transform_point(wp.transform_inverse(actual_X_ws_b), p_a)
scale = geo.scale[actual_shape_b]
closest = closest_point_plane(scale[0], scale[1], query_b)
d = wp.length(query_b - closest)
r_a = collision_radius[actual_shape_a]
if d > r_a + rigid_contact_margin:
return
else:
p_b = wp.transform_get_translation(actual_X_ws_b)
d = wp.length(p_a - p_b) * 0.5 - 0.1
r_a = collision_radius[actual_shape_a]
r_b = collision_radius[actual_shape_b]
if d > r_a + r_b + rigid_contact_margin:
return
pair_index_ab = actual_shape_a * num_shapes + actual_shape_b
pair_index_ba = actual_shape_b * num_shapes + actual_shape_a
# determine how many contact points need to be evaluated
num_contacts = 0
if actual_type_a == wp.sim.GEO_SPHERE:
num_contacts = 1
elif actual_type_a == wp.sim.GEO_CAPSULE:
if actual_type_b == wp.sim.GEO_PLANE:
if geo.scale[actual_shape_b][0] == 0.0 and geo.scale[actual_shape_b][1] == 0.0:
num_contacts = 2 # vertex-based collision for infinite plane
else:
num_contacts = 2 + 4 # vertex-based collision + plane edges
elif actual_type_b == wp.sim.GEO_MESH:
num_contacts_a = 2
mesh_b = wp.mesh_get(geo.source[actual_shape_b])
if iterate_mesh_vertices:
num_contacts_b = mesh_b.points.shape[0]
else:
num_contacts_b = 0
num_contacts = num_contacts_a + num_contacts_b
index = wp.atomic_add(contact_count, 0, num_contacts)
if index + num_contacts - 1 >= rigid_contact_max:
print("Number of rigid contacts exceeded limit. Increase Model.rigid_contact_max.")
return
# allocate contact points from capsule A against mesh B
for i in range(num_contacts_a):
contact_shape0[index + i] = actual_shape_a
contact_shape1[index + i] = actual_shape_b
contact_point_id[index + i] = i
# allocate contact points from mesh B against capsule A
for i in range(num_contacts_b):
contact_shape0[index + num_contacts_a + i] = actual_shape_b
contact_shape1[index + num_contacts_a + i] = actual_shape_a
contact_point_id[index + num_contacts_a + i] = i
contact_point_limit[pair_index_ab] = 2
if mesh_contact_max > 0:
num_contacts_b = wp.min(mesh_contact_max, num_contacts_b)
contact_point_limit[pair_index_ba] = num_contacts_b
return
else:
num_contacts = 2
elif actual_type_a == wp.sim.GEO_BOX:
if actual_type_b == wp.sim.GEO_BOX:
index = wp.atomic_add(contact_count, 0, 24)
if index + 23 >= rigid_contact_max:
print("Number of rigid contacts exceeded limit. Increase Model.rigid_contact_max.")
return
# allocate contact points from box A against B
for i in range(12): # 12 edges
contact_shape0[index + i] = shape_a
contact_shape1[index + i] = shape_b
contact_point_id[index + i] = i
contact_point_limit[pair_index_ab] = 12
# allocate contact points from box B against A
for i in range(12):
contact_shape0[index + 12 + i] = shape_b
contact_shape1[index + 12 + i] = shape_a
contact_point_id[index + 12 + i] = i
contact_point_limit[pair_index_ba] = 12
return
elif actual_type_b == wp.sim.GEO_MESH:
num_contacts_a = 8
mesh_b = wp.mesh_get(geo.source[actual_shape_b])
if iterate_mesh_vertices:
num_contacts_b = mesh_b.points.shape[0]
else:
num_contacts_b = 0
num_contacts = num_contacts_a + num_contacts_b
index = wp.atomic_add(contact_count, 0, num_contacts)
if index + num_contacts - 1 >= rigid_contact_max:
print("Number of rigid contacts exceeded limit. Increase Model.rigid_contact_max.")
return
# allocate contact points from box A against mesh B
for i in range(num_contacts_a):
contact_shape0[index + i] = actual_shape_a
contact_shape1[index + i] = actual_shape_b
contact_point_id[index + i] = i
# allocate contact points from mesh B against box A
for i in range(num_contacts_b):
contact_shape0[index + num_contacts_a + i] = actual_shape_b
contact_shape1[index + num_contacts_a + i] = actual_shape_a
contact_point_id[index + num_contacts_a + i] = i
contact_point_limit[pair_index_ab] = num_contacts_a
if mesh_contact_max > 0:
num_contacts_b = wp.min(mesh_contact_max, num_contacts_b)
contact_point_limit[pair_index_ba] = num_contacts_b
return
elif actual_type_b == wp.sim.GEO_PLANE:
if geo.scale[actual_shape_b][0] == 0.0 and geo.scale[actual_shape_b][1] == 0.0:
num_contacts = 8 # vertex-based collision
else:
num_contacts = 8 + 4 # vertex-based collision + plane edges
else:
num_contacts = 8
elif actual_type_a == wp.sim.GEO_MESH:
mesh_a = wp.mesh_get(geo.source[actual_shape_a])
num_contacts_a = mesh_a.points.shape[0]
num_contacts_b = 0
if actual_type_b == wp.sim.GEO_MESH:
mesh_b = wp.mesh_get(geo.source[actual_shape_b])
num_contacts_b = mesh_b.points.shape[0]
elif actual_type_b != wp.sim.GEO_PLANE:
print("broadphase_collision_pairs: unsupported geometry type for mesh collision")
return
num_contacts = num_contacts_a + num_contacts_b
if num_contacts > 0:
index = wp.atomic_add(contact_count, 0, num_contacts)
if index + num_contacts - 1 >= rigid_contact_max:
print("Mesh contact: Number of rigid contacts exceeded limit. Increase Model.rigid_contact_max.")
return
# allocate contact points from mesh A against B
for i in range(num_contacts_a):
contact_shape0[index + i] = actual_shape_a
contact_shape1[index + i] = actual_shape_b
contact_point_id[index + i] = i
# allocate contact points from mesh B against A
for i in range(num_contacts_b):
contact_shape0[index + num_contacts_a + i] = actual_shape_b
contact_shape1[index + num_contacts_a + i] = actual_shape_a
contact_point_id[index + num_contacts_a + i] = i
if mesh_contact_max > 0:
num_contacts_a = wp.min(mesh_contact_max, num_contacts_a)
num_contacts_b = wp.min(mesh_contact_max, num_contacts_b)
contact_point_limit[pair_index_ab] = num_contacts_a
contact_point_limit[pair_index_ba] = num_contacts_b
return
elif actual_type_a == wp.sim.GEO_PLANE:
return # no plane-plane contacts
else:
print("broadphase_collision_pairs: unsupported geometry type")
if num_contacts > 0:
index = wp.atomic_add(contact_count, 0, num_contacts)
if index + num_contacts - 1 >= rigid_contact_max:
print("Number of rigid contacts exceeded limit. Increase Model.rigid_contact_max.")
return
# allocate contact points
for i in range(num_contacts):
cp_index = index + i
contact_shape0[cp_index] = actual_shape_a
contact_shape1[cp_index] = actual_shape_b
contact_point_id[cp_index] = i
contact_point_limit[pair_index_ab] = num_contacts
contact_point_limit[pair_index_ba] = 0
@wp.kernel
def handle_contact_pairs(
body_q: wp.array(dtype=wp.transform),
shape_X_bs: wp.array(dtype=wp.transform),
shape_body: wp.array(dtype=int),
geo: ModelShapeGeometry,
rigid_contact_margin: float,
contact_broad_shape0: wp.array(dtype=int),
contact_broad_shape1: wp.array(dtype=int),
num_shapes: int,
contact_point_id: wp.array(dtype=int),
contact_point_limit: wp.array(dtype=int),
edge_sdf_iter: int,
# outputs
contact_count: wp.array(dtype=int),
contact_shape0: wp.array(dtype=int),
contact_shape1: wp.array(dtype=int),
contact_point0: wp.array(dtype=wp.vec3),
contact_point1: wp.array(dtype=wp.vec3),
contact_offset0: wp.array(dtype=wp.vec3),
contact_offset1: wp.array(dtype=wp.vec3),
contact_normal: wp.array(dtype=wp.vec3),
contact_thickness: wp.array(dtype=float),
contact_pairwise_counter: wp.array(dtype=int),
contact_tids: wp.array(dtype=int),
):
tid = wp.tid()
shape_a = contact_broad_shape0[tid]
shape_b = contact_broad_shape1[tid]
if shape_a == shape_b:
return
point_id = contact_point_id[tid]
pair_index = shape_a * num_shapes + shape_b
contact_limit = contact_point_limit[pair_index]
if contact_pairwise_counter[pair_index] >= contact_limit:
# reached limit of contact points per contact pair
return
rigid_a = shape_body[shape_a]
X_wb_a = wp.transform_identity()
if rigid_a >= 0:
X_wb_a = body_q[rigid_a]
X_bs_a = shape_X_bs[shape_a]
X_ws_a = wp.transform_multiply(X_wb_a, X_bs_a)
X_sw_a = wp.transform_inverse(X_ws_a)
X_bw_a = wp.transform_inverse(X_wb_a)
geo_type_a = geo.type[shape_a]
geo_scale_a = geo.scale[shape_a]
min_scale_a = min(geo_scale_a)
thickness_a = geo.thickness[shape_a]
# is_solid_a = geo.is_solid[shape_a]
rigid_b = shape_body[shape_b]
X_wb_b = wp.transform_identity()
if rigid_b >= 0:
X_wb_b = body_q[rigid_b]
X_bs_b = shape_X_bs[shape_b]
X_ws_b = wp.transform_multiply(X_wb_b, X_bs_b)
X_sw_b = wp.transform_inverse(X_ws_b)
X_bw_b = wp.transform_inverse(X_wb_b)
geo_type_b = geo.type[shape_b]
geo_scale_b = geo.scale[shape_b]
min_scale_b = min(geo_scale_b)
thickness_b = geo.thickness[shape_b]
# is_solid_b = geo.is_solid[shape_b]
distance = 1.0e6
u = float(0.0)
thickness = thickness_a + thickness_b
if geo_type_a == wp.sim.GEO_SPHERE:
p_a_world = wp.transform_get_translation(X_ws_a)
if geo_type_b == wp.sim.GEO_SPHERE:
p_b_world = wp.transform_get_translation(X_ws_b)
elif geo_type_b == wp.sim.GEO_BOX:
# contact point in frame of body B
p_a_body = wp.transform_point(X_sw_b, p_a_world)
p_b_body = closest_point_box(geo_scale_b, p_a_body)
p_b_world = wp.transform_point(X_ws_b, p_b_body)
elif geo_type_b == wp.sim.GEO_CAPSULE:
half_height_b = geo_scale_b[1]
# capsule B
A_b = wp.transform_point(X_ws_b, wp.vec3(0.0, half_height_b, 0.0))
B_b = wp.transform_point(X_ws_b, wp.vec3(0.0, -half_height_b, 0.0))
p_b_world = closest_point_line_segment(A_b, B_b, p_a_world)
elif geo_type_b == wp.sim.GEO_MESH:
mesh_b = geo.source[shape_b]
query_b_local = wp.transform_point(X_sw_b, p_a_world)
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
max_dist = (thickness + rigid_contact_margin) / min_scale_b
res = wp.mesh_query_point_sign_normal(
mesh_b, wp.cw_div(query_b_local, geo_scale_b), max_dist, sign, face_index, face_u, face_v
)
if res:
shape_p = wp.mesh_eval_position(mesh_b, face_index, face_u, face_v)
shape_p = wp.cw_mul(shape_p, geo_scale_b)
p_b_world = wp.transform_point(X_ws_b, shape_p)
else:
return
elif geo_type_b == wp.sim.GEO_PLANE:
p_b_body = closest_point_plane(geo_scale_b[0], geo_scale_b[1], wp.transform_point(X_sw_b, p_a_world))
p_b_world = wp.transform_point(X_ws_b, p_b_body)
else:
print("Unsupported geometry type in sphere collision handling")
print(geo_type_b)
return
diff = p_a_world - p_b_world
normal = wp.normalize(diff)
distance = wp.dot(diff, normal)
elif geo_type_a == wp.sim.GEO_BOX and geo_type_b == wp.sim.GEO_BOX:
# edge-based box contact
edge = get_box_edge(point_id, geo_scale_a)
edge0_world = wp.transform_point(X_ws_a, wp.spatial_top(edge))
edge1_world = wp.transform_point(X_ws_a, wp.spatial_bottom(edge))
edge0_b = wp.transform_point(X_sw_b, edge0_world)
edge1_b = wp.transform_point(X_sw_b, edge1_world)
max_iter = edge_sdf_iter
u = closest_edge_coordinate_box(geo_scale_b, edge0_b, edge1_b, max_iter)
p_a_world = (1.0 - u) * edge0_world + u * edge1_world
# find closest point + contact normal on box B
query_b = wp.transform_point(X_sw_b, p_a_world)
p_b_body = closest_point_box(geo_scale_b, query_b)
p_b_world = wp.transform_point(X_ws_b, p_b_body)
diff = p_a_world - p_b_world
# use center of box A to query normal to make sure we are not inside B
query_b = wp.transform_point(X_sw_b, wp.transform_get_translation(X_ws_a))
normal = wp.transform_vector(X_ws_b, box_sdf_grad(geo_scale_b, query_b))
distance = wp.dot(diff, normal)
elif geo_type_a == wp.sim.GEO_BOX and geo_type_b == wp.sim.GEO_CAPSULE:
half_height_b = geo_scale_b[1]
# capsule B
# depending on point id, we query an edge from 0 to 0.5 or 0.5 to 1
e0 = wp.vec3(0.0, -half_height_b * float(point_id % 2), 0.0)
e1 = wp.vec3(0.0, half_height_b * float((point_id + 1) % 2), 0.0)
edge0_world = wp.transform_point(X_ws_b, e0)
edge1_world = wp.transform_point(X_ws_b, e1)
edge0_a = wp.transform_point(X_sw_a, edge0_world)
edge1_a = wp.transform_point(X_sw_a, edge1_world)
max_iter = edge_sdf_iter
u = closest_edge_coordinate_box(geo_scale_a, edge0_a, edge1_a, max_iter)
p_b_world = (1.0 - u) * edge0_world + u * edge1_world
# find closest point + contact normal on box A
query_a = wp.transform_point(X_sw_a, p_b_world)
p_a_body = closest_point_box(geo_scale_a, query_a)
p_a_world = wp.transform_point(X_ws_a, p_a_body)
diff = p_a_world - p_b_world
# the contact point inside the capsule should already be outside the box
normal = -wp.transform_vector(X_ws_a, box_sdf_grad(geo_scale_a, query_a))
distance = wp.dot(diff, normal)
elif geo_type_a == wp.sim.GEO_BOX and geo_type_b == wp.sim.GEO_PLANE:
plane_width = geo_scale_b[0]
plane_length = geo_scale_b[1]
if point_id < 8:
# vertex-based contact
p_a_body = get_box_vertex(point_id, geo_scale_a)
p_a_world = wp.transform_point(X_ws_a, p_a_body)
query_b = wp.transform_point(X_sw_b, p_a_world)
p_b_body = closest_point_plane(plane_width, plane_length, query_b)
p_b_world = wp.transform_point(X_ws_b, p_b_body)
diff = p_a_world - p_b_world
normal = wp.transform_vector(X_ws_b, wp.vec3(0.0, 1.0, 0.0))
if plane_width > 0.0 and plane_length > 0.0:
if wp.abs(query_b[0]) > plane_width or wp.abs(query_b[2]) > plane_length:
# skip, we will evaluate the plane edge contact with the box later
return
# check whether the COM is above the plane
# sign = wp.sign(wp.dot(wp.transform_get_translation(X_ws_a) - p_b_world, normal))
# if sign < 0.0:
# # the entire box is most likely below the plane
# return
# the contact point is within plane boundaries
distance = wp.dot(diff, normal)
else:
# contact between box A and edges of finite plane B
edge = get_plane_edge(point_id - 8, plane_width, plane_length)
edge0_world = wp.transform_point(X_ws_b, wp.spatial_top(edge))
edge1_world = wp.transform_point(X_ws_b, wp.spatial_bottom(edge))
edge0_a = wp.transform_point(X_sw_a, edge0_world)
edge1_a = wp.transform_point(X_sw_a, edge1_world)
max_iter = edge_sdf_iter
u = closest_edge_coordinate_box(geo_scale_a, edge0_a, edge1_a, max_iter)
p_b_world = (1.0 - u) * edge0_world + u * edge1_world
# find closest point + contact normal on box A
query_a = wp.transform_point(X_sw_a, p_b_world)
p_a_body = closest_point_box(geo_scale_a, query_a)
p_a_world = wp.transform_point(X_ws_a, p_a_body)
query_b = wp.transform_point(X_sw_b, p_a_world)
if wp.abs(query_b[0]) > plane_width or wp.abs(query_b[2]) > plane_length:
# ensure that the closest point is actually inside the plane
return
diff = p_a_world - p_b_world
com_a = wp.transform_get_translation(X_ws_a)
query_b = wp.transform_point(X_sw_b, com_a)
if wp.abs(query_b[0]) > plane_width or wp.abs(query_b[2]) > plane_length:
# the COM is outside the plane
normal = wp.normalize(com_a - p_b_world)
else:
normal = wp.transform_vector(X_ws_b, wp.vec3(0.0, 1.0, 0.0))
distance = wp.dot(diff, normal)
elif geo_type_a == wp.sim.GEO_CAPSULE and geo_type_b == wp.sim.GEO_CAPSULE:
# find closest edge coordinate to capsule SDF B
half_height_a = geo_scale_a[1]
half_height_b = geo_scale_b[1]
# edge from capsule A
# depending on point id, we query an edge from 0 to 0.5 or 0.5 to 1
e0 = wp.vec3(0.0, half_height_a * float(point_id % 2), 0.0)
e1 = wp.vec3(0.0, -half_height_a * float((point_id + 1) % 2), 0.0)
edge0_world = wp.transform_point(X_ws_a, e0)
edge1_world = wp.transform_point(X_ws_a, e1)
edge0_b = wp.transform_point(X_sw_b, edge0_world)
edge1_b = wp.transform_point(X_sw_b, edge1_world)
max_iter = edge_sdf_iter
u = closest_edge_coordinate_capsule(geo_scale_b[0], geo_scale_b[1], edge0_b, edge1_b, max_iter)
p_a_world = (1.0 - u) * edge0_world + u * edge1_world
p0_b_world = wp.transform_point(X_ws_b, wp.vec3(0.0, half_height_b, 0.0))
p1_b_world = wp.transform_point(X_ws_b, wp.vec3(0.0, -half_height_b, 0.0))
p_b_world = closest_point_line_segment(p0_b_world, p1_b_world, p_a_world)
diff = p_a_world - p_b_world
normal = wp.normalize(diff)
distance = wp.dot(diff, normal)
elif geo_type_a == wp.sim.GEO_CAPSULE and geo_type_b == wp.sim.GEO_MESH:
# find closest edge coordinate to mesh SDF B
half_height_a = geo_scale_a[1]
# edge from capsule A
# depending on point id, we query an edge from -h to 0 or 0 to h
e0 = wp.vec3(0.0, -half_height_a * float(point_id % 2), 0.0)
e1 = wp.vec3(0.0, half_height_a * float((point_id + 1) % 2), 0.0)
edge0_world = wp.transform_point(X_ws_a, e0)
edge1_world = wp.transform_point(X_ws_a, e1)
edge0_b = wp.transform_point(X_sw_b, edge0_world)
edge1_b = wp.transform_point(X_sw_b, edge1_world)
max_iter = edge_sdf_iter
max_dist = (rigid_contact_margin + thickness) / min_scale_b
mesh_b = geo.source[shape_b]
u = closest_edge_coordinate_mesh(
mesh_b, wp.cw_div(edge0_b, geo_scale_b), wp.cw_div(edge1_b, geo_scale_b), max_iter, max_dist
)
p_a_world = (1.0 - u) * edge0_world + u * edge1_world
query_b_local = wp.transform_point(X_sw_b, p_a_world)
mesh_b = geo.source[shape_b]
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
res = wp.mesh_query_point_sign_normal(
mesh_b, wp.cw_div(query_b_local, geo_scale_b), max_dist, sign, face_index, face_u, face_v
)
if res:
shape_p = wp.mesh_eval_position(mesh_b, face_index, face_u, face_v)
shape_p = wp.cw_mul(shape_p, geo_scale_b)
p_b_world = wp.transform_point(X_ws_b, shape_p)
p_a_world = closest_point_line_segment(edge0_world, edge1_world, p_b_world)
# contact direction vector in world frame
diff = p_a_world - p_b_world
normal = wp.normalize(diff)
distance = wp.dot(diff, normal)
else:
return
elif geo_type_a == wp.sim.GEO_MESH and geo_type_b == wp.sim.GEO_CAPSULE:
# vertex-based contact
mesh = wp.mesh_get(geo.source[shape_a])
body_a_pos = wp.cw_mul(mesh.points[point_id], geo_scale_a)
p_a_world = wp.transform_point(X_ws_a, body_a_pos)
# find closest point + contact normal on capsule B
half_height_b = geo_scale_b[1]
A_b = wp.transform_point(X_ws_b, wp.vec3(0.0, half_height_b, 0.0))
B_b = wp.transform_point(X_ws_b, wp.vec3(0.0, -half_height_b, 0.0))
p_b_world = closest_point_line_segment(A_b, B_b, p_a_world)
diff = p_a_world - p_b_world
# this is more reliable in practice than using the SDF gradient
normal = wp.normalize(diff)
distance = wp.dot(diff, normal)
elif geo_type_a == wp.sim.GEO_CAPSULE and geo_type_b == wp.sim.GEO_PLANE:
plane_width = geo_scale_b[0]
plane_length = geo_scale_b[1]
if point_id < 2:
# vertex-based collision
half_height_a = geo_scale_a[1]
side = float(point_id) * 2.0 - 1.0
p_a_world = wp.transform_point(X_ws_a, wp.vec3(0.0, side * half_height_a, 0.0))
query_b = wp.transform_point(X_sw_b, p_a_world)
p_b_body = closest_point_plane(geo_scale_b[0], geo_scale_b[1], query_b)
p_b_world = wp.transform_point(X_ws_b, p_b_body)
diff = p_a_world - p_b_world
if geo_scale_b[0] > 0.0 and geo_scale_b[1] > 0.0:
normal = wp.normalize(diff)
else:
normal = wp.transform_vector(X_ws_b, wp.vec3(0.0, 1.0, 0.0))
distance = wp.dot(diff, normal)
else:
# contact between capsule A and edges of finite plane B
plane_width = geo_scale_b[0]
plane_length = geo_scale_b[1]
edge = get_plane_edge(point_id - 2, plane_width, plane_length)
edge0_world = wp.transform_point(X_ws_b, wp.spatial_top(edge))
edge1_world = wp.transform_point(X_ws_b, wp.spatial_bottom(edge))
edge0_a = wp.transform_point(X_sw_a, edge0_world)
edge1_a = wp.transform_point(X_sw_a, edge1_world)
max_iter = edge_sdf_iter
u = closest_edge_coordinate_capsule(geo_scale_a[0], geo_scale_a[1], edge0_a, edge1_a, max_iter)
p_b_world = (1.0 - u) * edge0_world + u * edge1_world
# find closest point + contact normal on capsule A
half_height_a = geo_scale_a[1]
p0_a_world = wp.transform_point(X_ws_a, wp.vec3(0.0, half_height_a, 0.0))
p1_a_world = wp.transform_point(X_ws_a, wp.vec3(0.0, -half_height_a, 0.0))
p_a_world = closest_point_line_segment(p0_a_world, p1_a_world, p_b_world)
diff = p_a_world - p_b_world
# normal = wp.transform_vector(X_ws_b, wp.vec3(0.0, 1.0, 0.0))
normal = wp.normalize(diff)
distance = wp.dot(diff, normal)
elif geo_type_a == wp.sim.GEO_MESH and geo_type_b == wp.sim.GEO_BOX:
# vertex-based contact
mesh = wp.mesh_get(geo.source[shape_a])
body_a_pos = wp.cw_mul(mesh.points[point_id], geo_scale_a)
p_a_world = wp.transform_point(X_ws_a, body_a_pos)
# find closest point + contact normal on box B
query_b = wp.transform_point(X_sw_b, p_a_world)
p_b_body = closest_point_box(geo_scale_b, query_b)
p_b_world = wp.transform_point(X_ws_b, p_b_body)
diff = p_a_world - p_b_world
# this is more reliable in practice than using the SDF gradient
normal = wp.normalize(diff)
if box_sdf(geo_scale_b, query_b) < 0.0:
normal = -normal
distance = wp.dot(diff, normal)
elif geo_type_a == wp.sim.GEO_BOX and geo_type_b == wp.sim.GEO_MESH:
# vertex-based contact
query_a = get_box_vertex(point_id, geo_scale_a)
p_a_world = wp.transform_point(X_ws_a, query_a)
query_b_local = wp.transform_point(X_sw_b, p_a_world)
mesh_b = geo.source[shape_b]
max_dist = (rigid_contact_margin + thickness) / min_scale_b
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
res = wp.mesh_query_point_sign_normal(
mesh_b, wp.cw_div(query_b_local, geo_scale_b), max_dist, sign, face_index, face_u, face_v
)
if res:
shape_p = wp.mesh_eval_position(mesh_b, face_index, face_u, face_v)
shape_p = wp.cw_mul(shape_p, geo_scale_b)
p_b_world = wp.transform_point(X_ws_b, shape_p)
# contact direction vector in world frame
diff_b = p_a_world - p_b_world
normal = wp.normalize(diff_b) * sign
distance = wp.dot(diff_b, normal)
else:
return
elif geo_type_a == wp.sim.GEO_MESH and geo_type_b == wp.sim.GEO_MESH:
# vertex-based contact
mesh = wp.mesh_get(geo.source[shape_a])
mesh_b = geo.source[shape_b]
body_a_pos = wp.cw_mul(mesh.points[point_id], geo_scale_a)
p_a_world = wp.transform_point(X_ws_a, body_a_pos)
query_b_local = wp.transform_point(X_sw_b, p_a_world)
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
min_scale = min(min_scale_a, min_scale_b)
max_dist = (rigid_contact_margin + thickness) / min_scale
res = wp.mesh_query_point_sign_normal(
mesh_b, wp.cw_div(query_b_local, geo_scale_b), max_dist, sign, face_index, face_u, face_v
)
if res:
shape_p = wp.mesh_eval_position(mesh_b, face_index, face_u, face_v)
shape_p = wp.cw_mul(shape_p, geo_scale_b)
p_b_world = wp.transform_point(X_ws_b, shape_p)
# contact direction vector in world frame
diff_b = p_a_world - p_b_world
normal = wp.normalize(diff_b) * sign
distance = wp.dot(diff_b, normal)
else:
return
elif geo_type_a == wp.sim.GEO_MESH and geo_type_b == wp.sim.GEO_PLANE:
# vertex-based contact
mesh = wp.mesh_get(geo.source[shape_a])
body_a_pos = wp.cw_mul(mesh.points[point_id], geo_scale_a)
p_a_world = wp.transform_point(X_ws_a, body_a_pos)
query_b = wp.transform_point(X_sw_b, p_a_world)
p_b_body = closest_point_plane(geo_scale_b[0], geo_scale_b[1], query_b)
p_b_world = wp.transform_point(X_ws_b, p_b_body)
diff = p_a_world - p_b_world
# if the plane is infinite or the point is within the plane we fix the normal to prevent intersections
if (
geo_scale_b[0] == 0.0
and geo_scale_b[1] == 0.0
or wp.abs(query_b[0]) < geo_scale_b[0]
and wp.abs(query_b[2]) < geo_scale_b[1]
):
normal = wp.transform_vector(X_ws_b, wp.vec3(0.0, 1.0, 0.0))
distance = wp.dot(diff, normal)
else:
normal = wp.normalize(diff)
distance = wp.dot(diff, normal)
# ignore extreme penetrations (e.g. when mesh is below the plane)
if distance < -rigid_contact_margin:
return
else:
print("Unsupported geometry pair in collision handling")
return
d = distance - thickness
if d < rigid_contact_margin:
pair_contact_id = limited_counter_increment(
contact_pairwise_counter, pair_index, contact_tids, tid, contact_limit
)
if pair_contact_id == -1:
# wp.printf("Reached contact point limit %d >= %d for shape pair %d and %d (pair_index: %d)\n",
# contact_pairwise_counter[pair_index], contact_limit, shape_a, shape_b, pair_index)
# reached contact point limit
return
index = limited_counter_increment(contact_count, 0, contact_tids, tid, -1)
contact_shape0[index] = shape_a
contact_shape1[index] = shape_b
# transform from world into body frame (so the contact point includes the shape transform)
contact_point0[index] = wp.transform_point(X_bw_a, p_a_world)
contact_point1[index] = wp.transform_point(X_bw_b, p_b_world)
contact_offset0[index] = wp.transform_vector(X_bw_a, -thickness_a * normal)
contact_offset1[index] = wp.transform_vector(X_bw_b, thickness_b * normal)
contact_normal[index] = normal
contact_thickness[index] = thickness
def collide(model, state, edge_sdf_iter: int = 10, iterate_mesh_vertices: bool = True, requires_grad: bool = None):
"""
Generates contact points for the particles and rigid bodies in the model,
to be used in the contact dynamics kernel of the integrator.
Args:
model: the model to be simulated
state: the state of the model
edge_sdf_iter: number of search iterations for finding closest contact points between edges and SDF
iterate_mesh_vertices: whether to iterate over all vertices of a mesh for contact generation (used for capsule/box <> mesh collision)
requires_grad: whether to duplicate contact arrays for gradient computation (if None uses model.requires_grad)
"""
if requires_grad is None:
requires_grad = model.requires_grad
with wp.ScopedTimer("collide", False):
# generate soft contacts for particles and shapes except ground plane (last shape)
if model.particle_count and model.shape_count > 1:
if requires_grad:
model.soft_contact_body_pos = wp.clone(model.soft_contact_body_pos)
model.soft_contact_body_vel = wp.clone(model.soft_contact_body_vel)
model.soft_contact_normal = wp.clone(model.soft_contact_normal)
# clear old count
model.soft_contact_count.zero_()
wp.launch(
kernel=create_soft_contacts,
dim=model.particle_count * (model.shape_count - 1),
inputs=[
state.particle_q,
model.particle_radius,
model.particle_flags,
state.body_q,
model.shape_transform,
model.shape_body,
model.shape_geo,
model.soft_contact_margin,
model.soft_contact_max,
model.shape_count - 1,
],
outputs=[
model.soft_contact_count,
model.soft_contact_particle,
model.soft_contact_shape,
model.soft_contact_body_pos,
model.soft_contact_body_vel,
model.soft_contact_normal,
model.soft_contact_tids,
],
device=model.device,
)
if model.shape_contact_pair_count or model.ground and model.shape_ground_contact_pair_count:
# clear old count
model.rigid_contact_count.zero_()
model.rigid_contact_broad_shape0.fill_(-1)
model.rigid_contact_broad_shape1.fill_(-1)
if model.shape_contact_pair_count:
wp.launch(
kernel=broadphase_collision_pairs,
dim=model.shape_contact_pair_count,
inputs=[
model.shape_contact_pairs,
state.body_q,
model.shape_transform,
model.shape_body,
model.body_mass,
model.shape_count,
model.shape_geo,
model.shape_collision_radius,
model.rigid_contact_max,
model.rigid_contact_margin,
model.rigid_mesh_contact_max,
iterate_mesh_vertices,
],
outputs=[
model.rigid_contact_count,
model.rigid_contact_broad_shape0,
model.rigid_contact_broad_shape1,
model.rigid_contact_point_id,
model.rigid_contact_point_limit,
],
device=model.device,
record_tape=False,
)
if model.ground and model.shape_ground_contact_pair_count:
wp.launch(
kernel=broadphase_collision_pairs,
dim=model.shape_ground_contact_pair_count,
inputs=[
model.shape_ground_contact_pairs,
state.body_q,
model.shape_transform,
model.shape_body,
model.body_mass,
model.shape_count,
model.shape_geo,
model.shape_collision_radius,
model.rigid_contact_max,
model.rigid_contact_margin,
model.rigid_mesh_contact_max,
iterate_mesh_vertices,
],
outputs=[
model.rigid_contact_count,
model.rigid_contact_broad_shape0,
model.rigid_contact_broad_shape1,
model.rigid_contact_point_id,
model.rigid_contact_point_limit,
],
device=model.device,
record_tape=False,
)
if model.shape_contact_pair_count or model.ground and model.shape_ground_contact_pair_count:
if requires_grad:
model.rigid_contact_point0 = wp.clone(model.rigid_contact_point0)
model.rigid_contact_point1 = wp.clone(model.rigid_contact_point1)
model.rigid_contact_offset0 = wp.clone(model.rigid_contact_offset0)
model.rigid_contact_offset1 = wp.clone(model.rigid_contact_offset1)
model.rigid_contact_normal = wp.clone(model.rigid_contact_normal)
model.rigid_contact_thickness = wp.clone(model.rigid_contact_thickness)
model.rigid_contact_count = wp.zeros_like(model.rigid_contact_count)
model.rigid_contact_pairwise_counter = wp.zeros_like(model.rigid_contact_pairwise_counter)
model.rigid_contact_tids = wp.zeros_like(model.rigid_contact_tids)
model.rigid_contact_shape0 = wp.empty_like(model.rigid_contact_shape0)
model.rigid_contact_shape1 = wp.empty_like(model.rigid_contact_shape1)
else:
model.rigid_contact_count.zero_()
model.rigid_contact_pairwise_counter.zero_()
model.rigid_contact_tids.zero_()
model.rigid_contact_shape0.fill_(-1)
model.rigid_contact_shape1.fill_(-1)
wp.launch(
kernel=handle_contact_pairs,
dim=model.rigid_contact_max,
inputs=[
state.body_q,
model.shape_transform,
model.shape_body,
model.shape_geo,
model.rigid_contact_margin,
model.rigid_contact_broad_shape0,
model.rigid_contact_broad_shape1,
model.shape_count,
model.rigid_contact_point_id,
model.rigid_contact_point_limit,
edge_sdf_iter,
],
outputs=[
model.rigid_contact_count,
model.rigid_contact_shape0,
model.rigid_contact_shape1,
model.rigid_contact_point0,
model.rigid_contact_point1,
model.rigid_contact_offset0,
model.rigid_contact_offset1,
model.rigid_contact_normal,
model.rigid_contact_thickness,
model.rigid_contact_pairwise_counter,
model.rigid_contact_tids,
],
device=model.device,
)
| 63,358 | Python | 38.723511 | 141 | 0.559929 |
NVIDIA/warp/warp/sim/__init__.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from .articulation import eval_fk, eval_ik
from .collide import collide
from .import_mjcf import parse_mjcf
from .import_snu import parse_snu
from .import_urdf import parse_urdf
from .import_usd import parse_usd, resolve_usd_from_url
from .inertia import transform_inertia
from .integrator import Integrator, integrate_bodies, integrate_particles
from .integrator_euler import SemiImplicitIntegrator
from .integrator_featherstone import FeatherstoneIntegrator
from .integrator_xpbd import XPBDIntegrator
from .model import (
GEO_BOX,
GEO_CAPSULE,
GEO_CONE,
GEO_CYLINDER,
GEO_MESH,
GEO_NONE,
GEO_PLANE,
GEO_SDF,
GEO_SPHERE,
JOINT_BALL,
JOINT_COMPOUND,
JOINT_D6,
JOINT_DISTANCE,
JOINT_FIXED,
JOINT_FREE,
JOINT_MODE_FORCE,
JOINT_MODE_TARGET_POSITION,
JOINT_MODE_TARGET_VELOCITY,
JOINT_PRISMATIC,
JOINT_REVOLUTE,
JOINT_UNIVERSAL,
SDF,
Control,
JointAxis,
Mesh,
Model,
ModelBuilder,
ModelShapeGeometry,
ModelShapeMaterials,
State,
)
from .utils import load_mesh, quat_from_euler, quat_to_euler, velocity_at_point
| 1,553 | Python | 28.320754 | 79 | 0.746941 |
NVIDIA/warp/warp/sim/articulation.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import warp as wp
from .utils import quat_decompose, quat_twist
@wp.func
def compute_2d_rotational_dofs(
axis_0: wp.vec3,
axis_1: wp.vec3,
q0: float,
q1: float,
qd0: float,
qd1: float,
):
"""
Computes the rotation quaternion and 3D angular velocity given the joint axes, coordinates and velocities.
"""
q_off = wp.quat_from_matrix(wp.mat33(axis_0, axis_1, wp.cross(axis_0, axis_1)))
# body local axes
local_0 = wp.quat_rotate(q_off, wp.vec3(1.0, 0.0, 0.0))
local_1 = wp.quat_rotate(q_off, wp.vec3(0.0, 1.0, 0.0))
axis_0 = local_0
q_0 = wp.quat_from_axis_angle(axis_0, q0)
axis_1 = wp.quat_rotate(q_0, local_1)
q_1 = wp.quat_from_axis_angle(axis_1, q1)
rot = q_1 * q_0
vel = axis_0 * qd0 + axis_1 * qd1
return rot, vel
@wp.func
def invert_2d_rotational_dofs(
axis_0: wp.vec3,
axis_1: wp.vec3,
q_p: wp.quat,
q_c: wp.quat,
w_err: wp.vec3,
):
"""
Computes generalized joint position and velocity coordinates for a 2D rotational joint given the joint axes, relative orientations and angular velocity differences between the two bodies the joint connects.
"""
q_off = wp.quat_from_matrix(wp.mat33(axis_0, axis_1, wp.cross(axis_0, axis_1)))
q_pc = wp.quat_inverse(q_off) * wp.quat_inverse(q_p) * q_c * q_off
# decompose to a compound rotation each axis
angles = quat_decompose(q_pc)
# find rotation axes
local_0 = wp.quat_rotate(q_off, wp.vec3(1.0, 0.0, 0.0))
local_1 = wp.quat_rotate(q_off, wp.vec3(0.0, 1.0, 0.0))
local_2 = wp.quat_rotate(q_off, wp.vec3(0.0, 0.0, 1.0))
axis_0 = local_0
q_0 = wp.quat_from_axis_angle(axis_0, angles[0])
axis_1 = wp.quat_rotate(q_0, local_1)
q_1 = wp.quat_from_axis_angle(axis_1, angles[1])
axis_2 = wp.quat_rotate(q_1 * q_0, local_2)
# convert angular velocity to local space
w_err_p = wp.quat_rotate_inv(q_p, w_err)
# given joint axes and angular velocity error, solve for joint velocities
c12 = wp.cross(axis_1, axis_2)
c02 = wp.cross(axis_0, axis_2)
vel = wp.vec2(wp.dot(w_err_p, c12) / wp.dot(axis_0, c12), wp.dot(w_err_p, c02) / wp.dot(axis_1, c02))
return wp.vec2(angles[0], angles[1]), vel
@wp.func
def compute_3d_rotational_dofs(
axis_0: wp.vec3,
axis_1: wp.vec3,
axis_2: wp.vec3,
q0: float,
q1: float,
q2: float,
qd0: float,
qd1: float,
qd2: float,
):
"""
Computes the rotation quaternion and 3D angular velocity given the joint axes, coordinates and velocities.
"""
q_off = wp.quat_from_matrix(wp.mat33(axis_0, axis_1, axis_2))
# body local axes
local_0 = wp.quat_rotate(q_off, wp.vec3(1.0, 0.0, 0.0))
local_1 = wp.quat_rotate(q_off, wp.vec3(0.0, 1.0, 0.0))
local_2 = wp.quat_rotate(q_off, wp.vec3(0.0, 0.0, 1.0))
# reconstruct rotation axes
axis_0 = local_0
q_0 = wp.quat_from_axis_angle(axis_0, q0)
axis_1 = wp.quat_rotate(q_0, local_1)
q_1 = wp.quat_from_axis_angle(axis_1, q1)
axis_2 = wp.quat_rotate(q_1 * q_0, local_2)
q_2 = wp.quat_from_axis_angle(axis_2, q2)
rot = q_2 * q_1 * q_0
vel = axis_0 * qd0 + axis_1 * qd1 + axis_2 * qd2
return rot, vel
@wp.func
def invert_3d_rotational_dofs(
axis_0: wp.vec3, axis_1: wp.vec3, axis_2: wp.vec3, q_p: wp.quat, q_c: wp.quat, w_err: wp.vec3
):
"""
Computes generalized joint position and velocity coordinates for a 3D rotational joint given the joint axes, relative orientations and angular velocity differences between the two bodies the joint connects.
"""
q_off = wp.quat_from_matrix(wp.mat33(axis_0, axis_1, axis_2))
q_pc = wp.quat_inverse(q_off) * wp.quat_inverse(q_p) * q_c * q_off
# decompose to a compound rotation each axis
angles = quat_decompose(q_pc)
# find rotation axes
local_0 = wp.quat_rotate(q_off, wp.vec3(1.0, 0.0, 0.0))
local_1 = wp.quat_rotate(q_off, wp.vec3(0.0, 1.0, 0.0))
local_2 = wp.quat_rotate(q_off, wp.vec3(0.0, 0.0, 1.0))
axis_0 = local_0
q_0 = wp.quat_from_axis_angle(axis_0, angles[0])
axis_1 = wp.quat_rotate(q_0, local_1)
q_1 = wp.quat_from_axis_angle(axis_1, angles[1])
axis_2 = wp.quat_rotate(q_1 * q_0, local_2)
# convert angular velocity to local space
w_err_p = wp.quat_rotate_inv(q_p, w_err)
# given joint axes and angular velocity error, solve for joint velocities
c12 = wp.cross(axis_1, axis_2)
c02 = wp.cross(axis_0, axis_2)
c01 = wp.cross(axis_0, axis_1)
velocities = wp.vec3(
wp.dot(w_err_p, c12) / wp.dot(axis_0, c12),
wp.dot(w_err_p, c02) / wp.dot(axis_1, c02),
wp.dot(w_err_p, c01) / wp.dot(axis_2, c01),
)
return angles, velocities
@wp.kernel
def eval_articulation_fk(
articulation_start: wp.array(dtype=int),
articulation_mask: wp.array(
dtype=int
), # used to enable / disable FK for an articulation, if None then treat all as enabled
joint_q: wp.array(dtype=float),
joint_qd: wp.array(dtype=float),
joint_q_start: wp.array(dtype=int),
joint_qd_start: wp.array(dtype=int),
joint_type: wp.array(dtype=int),
joint_parent: wp.array(dtype=int),
joint_child: wp.array(dtype=int),
joint_X_p: wp.array(dtype=wp.transform),
joint_X_c: wp.array(dtype=wp.transform),
joint_axis: wp.array(dtype=wp.vec3),
joint_axis_start: wp.array(dtype=int),
joint_axis_dim: wp.array(dtype=int, ndim=2),
body_com: wp.array(dtype=wp.vec3),
# outputs
body_q: wp.array(dtype=wp.transform),
body_qd: wp.array(dtype=wp.spatial_vector),
):
tid = wp.tid()
# early out if disabling FK for this articulation
if articulation_mask:
if articulation_mask[tid] == 0:
return
joint_start = articulation_start[tid]
joint_end = articulation_start[tid + 1]
for i in range(joint_start, joint_end):
parent = joint_parent[i]
child = joint_child[i]
# compute transform across the joint
type = joint_type[i]
X_pj = joint_X_p[i]
X_cj = joint_X_c[i]
# parent anchor frame in world space
X_wpj = X_pj
# velocity of parent anchor point in world space
v_wpj = wp.spatial_vector()
if parent >= 0:
X_wp = body_q[parent]
X_wpj = X_wp * X_wpj
r_p = wp.transform_get_translation(X_wpj) - wp.transform_point(X_wp, body_com[parent])
v_wp = body_qd[parent]
w_p = wp.spatial_top(v_wp)
v_p = wp.spatial_bottom(v_wp) + wp.cross(w_p, r_p)
v_wpj = wp.spatial_vector(w_p, v_p)
q_start = joint_q_start[i]
qd_start = joint_qd_start[i]
axis_start = joint_axis_start[i]
lin_axis_count = joint_axis_dim[i, 0]
ang_axis_count = joint_axis_dim[i, 1]
X_j = wp.transform_identity()
v_j = wp.spatial_vector(wp.vec3(), wp.vec3())
if type == wp.sim.JOINT_PRISMATIC:
axis = joint_axis[axis_start]
q = joint_q[q_start]
qd = joint_qd[qd_start]
X_j = wp.transform(axis * q, wp.quat_identity())
v_j = wp.spatial_vector(wp.vec3(), axis * qd)
if type == wp.sim.JOINT_REVOLUTE:
axis = joint_axis[axis_start]
q = joint_q[q_start]
qd = joint_qd[qd_start]
X_j = wp.transform(wp.vec3(), wp.quat_from_axis_angle(axis, q))
v_j = wp.spatial_vector(axis * qd, wp.vec3())
if type == wp.sim.JOINT_BALL:
r = wp.quat(joint_q[q_start + 0], joint_q[q_start + 1], joint_q[q_start + 2], joint_q[q_start + 3])
w = wp.vec3(joint_qd[qd_start + 0], joint_qd[qd_start + 1], joint_qd[qd_start + 2])
X_j = wp.transform(wp.vec3(), r)
v_j = wp.spatial_vector(w, wp.vec3())
if type == wp.sim.JOINT_FREE or type == wp.sim.JOINT_DISTANCE:
t = wp.transform(
wp.vec3(joint_q[q_start + 0], joint_q[q_start + 1], joint_q[q_start + 2]),
wp.quat(joint_q[q_start + 3], joint_q[q_start + 4], joint_q[q_start + 5], joint_q[q_start + 6]),
)
v = wp.spatial_vector(
wp.vec3(joint_qd[qd_start + 0], joint_qd[qd_start + 1], joint_qd[qd_start + 2]),
wp.vec3(joint_qd[qd_start + 3], joint_qd[qd_start + 4], joint_qd[qd_start + 5]),
)
X_j = t
v_j = v
if type == wp.sim.JOINT_COMPOUND:
rot, vel_w = compute_3d_rotational_dofs(
joint_axis[axis_start],
joint_axis[axis_start + 1],
joint_axis[axis_start + 2],
joint_q[q_start + 0],
joint_q[q_start + 1],
joint_q[q_start + 2],
joint_qd[qd_start + 0],
joint_qd[qd_start + 1],
joint_qd[qd_start + 2],
)
t = wp.transform(wp.vec3(0.0, 0.0, 0.0), rot)
v = wp.spatial_vector(vel_w, wp.vec3(0.0, 0.0, 0.0))
X_j = t
v_j = v
if type == wp.sim.JOINT_UNIVERSAL:
rot, vel_w = compute_2d_rotational_dofs(
joint_axis[axis_start],
joint_axis[axis_start + 1],
joint_q[q_start + 0],
joint_q[q_start + 1],
joint_qd[qd_start + 0],
joint_qd[qd_start + 1],
)
t = wp.transform(wp.vec3(0.0, 0.0, 0.0), rot)
v = wp.spatial_vector(vel_w, wp.vec3(0.0, 0.0, 0.0))
X_j = t
v_j = v
if type == wp.sim.JOINT_D6:
pos = wp.vec3(0.0)
rot = wp.quat_identity()
vel_v = wp.vec3(0.0)
vel_w = wp.vec3(0.0)
# unroll for loop to ensure joint actions remain differentiable
# (since differentiating through a for loop that updates a local variable is not supported)
if lin_axis_count > 0:
axis = joint_axis[axis_start + 0]
pos += axis * joint_q[q_start + 0]
vel_v += axis * joint_qd[qd_start + 0]
if lin_axis_count > 1:
axis = joint_axis[axis_start + 1]
pos += axis * joint_q[q_start + 1]
vel_v += axis * joint_qd[qd_start + 1]
if lin_axis_count > 2:
axis = joint_axis[axis_start + 2]
pos += axis * joint_q[q_start + 2]
vel_v += axis * joint_qd[qd_start + 2]
ia = axis_start + lin_axis_count
iq = q_start + lin_axis_count
iqd = qd_start + lin_axis_count
if ang_axis_count == 1:
axis = joint_axis[ia]
rot = wp.quat_from_axis_angle(axis, joint_q[iq])
vel_w = joint_qd[iqd] * axis
if ang_axis_count == 2:
rot, vel_w = compute_2d_rotational_dofs(
joint_axis[ia + 0],
joint_axis[ia + 1],
joint_q[iq + 0],
joint_q[iq + 1],
joint_qd[iqd + 0],
joint_qd[iqd + 1],
)
if ang_axis_count == 3:
rot, vel_w = compute_3d_rotational_dofs(
joint_axis[ia + 0],
joint_axis[ia + 1],
joint_axis[ia + 2],
joint_q[iq + 0],
joint_q[iq + 1],
joint_q[iq + 2],
joint_qd[iqd + 0],
joint_qd[iqd + 1],
joint_qd[iqd + 2],
)
X_j = wp.transform(pos, rot)
v_j = wp.spatial_vector(vel_w, vel_v)
# transform from world to joint anchor frame at child body
X_wcj = X_wpj * X_j
# transform from world to child body frame
X_wc = X_wcj * wp.transform_inverse(X_cj)
# transform velocity across the joint to world space
angular_vel = wp.transform_vector(X_wpj, wp.spatial_top(v_j))
linear_vel = wp.transform_vector(X_wpj, wp.spatial_bottom(v_j))
v_wc = v_wpj + wp.spatial_vector(angular_vel, linear_vel)
body_q[child] = X_wc
body_qd[child] = v_wc
# updates state body information based on joint coordinates
def eval_fk(model, joint_q, joint_qd, mask, state):
"""
Evaluates the model's forward kinematics given the joint coordinates and updates the state's body information (:attr:`State.body_q` and :attr:`State.body_qd`).
Args:
model (Model): The model to evaluate.
joint_q (array): Generalized joint position coordinates, shape [joint_coord_count], float
joint_qd (array): Generalized joint velocity coordinates, shape [joint_dof_count], float
mask (array): The mask to use to enable / disable FK for an articulation. If None then treat all as enabled, shape [articulation_count], int
state (State): The state to update.
"""
wp.launch(
kernel=eval_articulation_fk,
dim=model.articulation_count,
inputs=[
model.articulation_start,
mask,
joint_q,
joint_qd,
model.joint_q_start,
model.joint_qd_start,
model.joint_type,
model.joint_parent,
model.joint_child,
model.joint_X_p,
model.joint_X_c,
model.joint_axis,
model.joint_axis_start,
model.joint_axis_dim,
model.body_com,
],
outputs=[
state.body_q,
state.body_qd,
],
device=model.device,
)
@wp.func
def reconstruct_angular_q_qd(q_pc: wp.quat, w_err: wp.vec3, X_wp: wp.transform, axis: wp.vec3):
"""
Reconstructs the angular joint coordinates and velocities given the relative rotation and angular velocity
between a parent and child body.
Args:
q_pc (quat): The relative rotation between the parent and child body.
w_err (vec3): The angular velocity between the parent and child body.
X_wp (transform): The parent body's transform in world space.
axis (vec3): The joint axis in the frame of the parent body.
Returns:
q (float): The joint position coordinate.
qd (float): The joint velocity coordinate.
"""
axis_p = wp.transform_vector(X_wp, axis)
twist = quat_twist(axis, q_pc)
q = wp.acos(twist[3]) * 2.0 * wp.sign(wp.dot(axis, wp.vec3(twist[0], twist[1], twist[2])))
qd = wp.dot(w_err, axis_p)
return q, qd
@wp.kernel
def eval_articulation_ik(
body_q: wp.array(dtype=wp.transform),
body_qd: wp.array(dtype=wp.spatial_vector),
body_com: wp.array(dtype=wp.vec3),
joint_type: wp.array(dtype=int),
joint_parent: wp.array(dtype=int),
joint_child: wp.array(dtype=int),
joint_X_p: wp.array(dtype=wp.transform),
joint_X_c: wp.array(dtype=wp.transform),
joint_axis: wp.array(dtype=wp.vec3),
joint_axis_start: wp.array(dtype=int),
joint_axis_dim: wp.array(dtype=int, ndim=2),
joint_q_start: wp.array(dtype=int),
joint_qd_start: wp.array(dtype=int),
joint_q: wp.array(dtype=float),
joint_qd: wp.array(dtype=float),
):
tid = wp.tid()
parent = joint_parent[tid]
child = joint_child[tid]
X_pj = joint_X_p[tid]
X_cj = joint_X_c[tid]
w_p = wp.vec3()
v_p = wp.vec3()
v_wp = wp.spatial_vector()
# parent anchor frame in world space
X_wpj = X_pj
if parent >= 0:
X_wp = body_q[parent]
X_wpj = X_wp * X_pj
r_p = wp.transform_get_translation(X_wpj) - wp.transform_point(X_wp, body_com[parent])
v_wp = body_qd[parent]
w_p = wp.spatial_top(v_wp)
v_p = wp.spatial_bottom(v_wp) + wp.cross(w_p, r_p)
# child transform and moment arm
X_wc = body_q[child]
X_wcj = X_wc * X_cj
v_wc = body_qd[child]
w_c = wp.spatial_top(v_wc)
v_c = wp.spatial_bottom(v_wc)
# joint properties
type = joint_type[tid]
# compute position and orientation differences between anchor frames
x_p = wp.transform_get_translation(X_wpj)
x_c = wp.transform_get_translation(X_wcj)
q_p = wp.transform_get_rotation(X_wpj)
q_c = wp.transform_get_rotation(X_wcj)
x_err = x_c - x_p
v_err = v_c - v_p
w_err = w_c - w_p
q_start = joint_q_start[tid]
qd_start = joint_qd_start[tid]
axis_start = joint_axis_start[tid]
lin_axis_count = joint_axis_dim[tid, 0]
ang_axis_count = joint_axis_dim[tid, 1]
if type == wp.sim.JOINT_PRISMATIC:
axis = joint_axis[axis_start]
# world space joint axis
axis_p = wp.quat_rotate(q_p, axis)
# evaluate joint coordinates
q = wp.dot(x_err, axis_p)
qd = wp.dot(v_err, axis_p)
joint_q[q_start] = q
joint_qd[qd_start] = qd
return
if type == wp.sim.JOINT_REVOLUTE:
axis = joint_axis[axis_start]
q_pc = wp.quat_inverse(q_p) * q_c
q, qd = reconstruct_angular_q_qd(q_pc, w_err, X_wpj, axis)
joint_q[q_start] = q
joint_qd[qd_start] = qd
return
if type == wp.sim.JOINT_BALL:
q_pc = wp.quat_inverse(q_p) * q_c
joint_q[q_start + 0] = q_pc[0]
joint_q[q_start + 1] = q_pc[1]
joint_q[q_start + 2] = q_pc[2]
joint_q[q_start + 3] = q_pc[3]
ang_vel = wp.transform_vector(wp.transform_inverse(X_wpj), w_err)
joint_qd[qd_start + 0] = ang_vel[0]
joint_qd[qd_start + 1] = ang_vel[1]
joint_qd[qd_start + 2] = ang_vel[2]
return
if type == wp.sim.JOINT_FIXED:
return
if type == wp.sim.JOINT_FREE or type == wp.sim.JOINT_DISTANCE:
q_pc = wp.quat_inverse(q_p) * q_c
x_err_c = wp.quat_rotate_inv(q_p, x_err)
v_err_c = wp.quat_rotate_inv(q_p, v_err)
w_err_c = wp.quat_rotate_inv(q_p, w_err)
joint_q[q_start + 0] = x_err_c[0]
joint_q[q_start + 1] = x_err_c[1]
joint_q[q_start + 2] = x_err_c[2]
joint_q[q_start + 3] = q_pc[0]
joint_q[q_start + 4] = q_pc[1]
joint_q[q_start + 5] = q_pc[2]
joint_q[q_start + 6] = q_pc[3]
joint_qd[qd_start + 0] = w_err_c[0]
joint_qd[qd_start + 1] = w_err_c[1]
joint_qd[qd_start + 2] = w_err_c[2]
joint_qd[qd_start + 3] = v_err_c[0]
joint_qd[qd_start + 4] = v_err_c[1]
joint_qd[qd_start + 5] = v_err_c[2]
return
if type == wp.sim.JOINT_COMPOUND:
axis_0 = joint_axis[axis_start + 0]
axis_1 = joint_axis[axis_start + 1]
axis_2 = joint_axis[axis_start + 2]
qs, qds = invert_3d_rotational_dofs(axis_0, axis_1, axis_2, q_p, q_c, w_err)
joint_q[q_start + 0] = qs[0]
joint_q[q_start + 1] = qs[1]
joint_q[q_start + 2] = qs[2]
joint_qd[qd_start + 0] = qds[0]
joint_qd[qd_start + 1] = qds[1]
joint_qd[qd_start + 2] = qds[2]
return
if type == wp.sim.JOINT_UNIVERSAL:
axis_0 = joint_axis[axis_start + 0]
axis_1 = joint_axis[axis_start + 1]
qs2, qds2 = invert_2d_rotational_dofs(axis_0, axis_1, q_p, q_c, w_err)
joint_q[q_start + 0] = qs2[0]
joint_q[q_start + 1] = qs2[1]
joint_qd[qd_start + 0] = qds2[0]
joint_qd[qd_start + 1] = qds2[1]
return
if type == wp.sim.JOINT_D6:
x_err_c = wp.quat_rotate_inv(q_p, x_err)
v_err_c = wp.quat_rotate_inv(q_p, v_err)
if lin_axis_count > 0:
axis = joint_axis[axis_start + 0]
joint_q[q_start + 0] = wp.dot(x_err_c, axis)
joint_qd[qd_start + 0] = wp.dot(v_err_c, axis)
if lin_axis_count > 1:
axis = joint_axis[axis_start + 1]
joint_q[q_start + 1] = wp.dot(x_err_c, axis)
joint_qd[qd_start + 1] = wp.dot(v_err_c, axis)
if lin_axis_count > 2:
axis = joint_axis[axis_start + 2]
joint_q[q_start + 2] = wp.dot(x_err_c, axis)
joint_qd[qd_start + 2] = wp.dot(v_err_c, axis)
if ang_axis_count == 1:
axis = joint_axis[axis_start]
q_pc = wp.quat_inverse(q_p) * q_c
q, qd = reconstruct_angular_q_qd(q_pc, w_err, X_wpj, joint_axis[axis_start + lin_axis_count])
joint_q[q_start + lin_axis_count] = q
joint_qd[qd_start + lin_axis_count] = qd
if ang_axis_count == 2:
axis_0 = joint_axis[axis_start + lin_axis_count + 0]
axis_1 = joint_axis[axis_start + lin_axis_count + 1]
qs2, qds2 = invert_2d_rotational_dofs(axis_0, axis_1, q_p, q_c, w_err)
joint_q[q_start + lin_axis_count + 0] = qs2[0]
joint_q[q_start + lin_axis_count + 1] = qs2[1]
joint_qd[qd_start + lin_axis_count + 0] = qds2[0]
joint_qd[qd_start + lin_axis_count + 1] = qds2[1]
if ang_axis_count == 3:
axis_0 = joint_axis[axis_start + lin_axis_count + 0]
axis_1 = joint_axis[axis_start + lin_axis_count + 1]
axis_2 = joint_axis[axis_start + lin_axis_count + 2]
qs3, qds3 = invert_3d_rotational_dofs(axis_0, axis_1, axis_2, q_p, q_c, w_err)
joint_q[q_start + lin_axis_count + 0] = qs3[0]
joint_q[q_start + lin_axis_count + 1] = qs3[1]
joint_q[q_start + lin_axis_count + 2] = qs3[2]
joint_qd[qd_start + lin_axis_count + 0] = qds3[0]
joint_qd[qd_start + lin_axis_count + 1] = qds3[1]
joint_qd[qd_start + lin_axis_count + 2] = qds3[2]
return
# given maximal coordinate model computes ik (closest point projection)
def eval_ik(model, state, joint_q, joint_qd):
"""
Evaluates the model's inverse kinematics given the state's body information (:attr:`State.body_q` and :attr:`State.body_qd`) and updates the generalized joint coordinates `joint_q` and `joint_qd`.
Args:
model (Model): The model to evaluate.
state (State): The state with the body's maximal coordinates (positions :attr:`State.body_q` and velocities :attr:`State.body_qd`) to use.
joint_q (array): Generalized joint position coordinates, shape [joint_coord_count], float
joint_qd (array): Generalized joint velocity coordinates, shape [joint_dof_count], float
"""
wp.launch(
kernel=eval_articulation_ik,
dim=model.joint_count,
inputs=[
state.body_q,
state.body_qd,
model.body_com,
model.joint_type,
model.joint_parent,
model.joint_child,
model.joint_X_p,
model.joint_X_c,
model.joint_axis,
model.joint_axis_start,
model.joint_axis_dim,
model.joint_q_start,
model.joint_qd_start,
],
outputs=[joint_q, joint_qd],
device=model.device,
)
| 23,349 | Python | 33.037901 | 210 | 0.555698 |
NVIDIA/warp/warp/sim/render.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from collections import defaultdict
import numpy as np
import warp as wp
import warp.render
import warp.sim
from warp.render.utils import solidify_mesh, tab10_color_map
# TODO allow NaNs in Warp kernels
NAN = wp.constant(-1.0e8)
@wp.kernel
def compute_contact_points(
body_q: wp.array(dtype=wp.transform),
shape_body: wp.array(dtype=int),
contact_count: wp.array(dtype=int),
contact_shape0: wp.array(dtype=int),
contact_shape1: wp.array(dtype=int),
contact_point0: wp.array(dtype=wp.vec3),
contact_point1: wp.array(dtype=wp.vec3),
# outputs
contact_pos0: wp.array(dtype=wp.vec3),
contact_pos1: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
count = contact_count[0]
if tid >= count:
contact_pos0[tid] = wp.vec3(NAN, NAN, NAN)
contact_pos1[tid] = wp.vec3(NAN, NAN, NAN)
return
shape_a = contact_shape0[tid]
shape_b = contact_shape1[tid]
if shape_a == shape_b:
contact_pos0[tid] = wp.vec3(NAN, NAN, NAN)
contact_pos1[tid] = wp.vec3(NAN, NAN, NAN)
return
body_a = shape_body[shape_a]
body_b = shape_body[shape_b]
X_wb_a = wp.transform_identity()
X_wb_b = wp.transform_identity()
if body_a >= 0:
X_wb_a = body_q[body_a]
if body_b >= 0:
X_wb_b = body_q[body_b]
contact_pos0[tid] = wp.transform_point(X_wb_a, contact_point0[tid])
contact_pos1[tid] = wp.transform_point(X_wb_b, contact_point1[tid])
def CreateSimRenderer(renderer):
class SimRenderer(renderer):
use_unique_colors = True
def __init__(
self,
model: warp.sim.Model,
path,
scaling=1.0,
fps=60,
up_axis="Y",
show_rigid_contact_points=False,
contact_points_radius=1e-3,
show_joints=False,
**render_kwargs,
):
# create USD stage
super().__init__(path, scaling=scaling, fps=fps, up_axis=up_axis, **render_kwargs)
self.scaling = scaling
self.cam_axis = "XYZ".index(up_axis.upper())
self.show_rigid_contact_points = show_rigid_contact_points
self.show_joints = show_joints
self.contact_points_radius = contact_points_radius
self.populate(model)
def populate(self, model: warp.sim.Model):
self.skip_rendering = False
self.model = model
self.num_envs = model.num_envs
self.body_names = []
if self.show_rigid_contact_points and model.rigid_contact_max:
self.contact_points0 = wp.array(
np.zeros((model.rigid_contact_max, 3)), dtype=wp.vec3, device=model.device
)
self.contact_points1 = wp.array(
np.zeros((model.rigid_contact_max, 3)), dtype=wp.vec3, device=model.device
)
self.contact_points0_colors = [(1.0, 0.5, 0.0)] * model.rigid_contact_max
self.contact_points1_colors = [(0.0, 0.5, 1.0)] * model.rigid_contact_max
self.body_env = [] # mapping from body index to its environment index
env_id = 0
self.bodies_per_env = model.body_count // self.num_envs
# create rigid body nodes
for b in range(model.body_count):
body_name = f"body_{b}_{self.model.body_name[b].replace(' ', '_')}"
self.body_names.append(body_name)
self.register_body(body_name)
if b > 0 and b % self.bodies_per_env == 0:
env_id += 1
self.body_env.append(env_id)
# create rigid shape children
if self.model.shape_count:
# mapping from hash of geometry to shape ID
self.geo_shape = {}
self.instance_count = 0
self.body_name = {} # mapping from body name to its body ID
self.body_shapes = defaultdict(list) # mapping from body index to its shape IDs
shape_body = model.shape_body.numpy()
shape_geo_src = model.shape_geo_src
shape_geo_type = model.shape_geo.type.numpy()
shape_geo_scale = model.shape_geo.scale.numpy()
shape_geo_thickness = model.shape_geo.thickness.numpy()
shape_geo_is_solid = model.shape_geo.is_solid.numpy()
shape_transform = model.shape_transform.numpy()
shape_visible = model.shape_visible.numpy()
p = np.zeros(3, dtype=np.float32)
q = np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32)
scale = np.ones(3)
color = (1.0, 1.0, 1.0)
# loop over shapes excluding the ground plane
for s in range(model.shape_count - 1):
geo_type = shape_geo_type[s]
geo_scale = [float(v) for v in shape_geo_scale[s]]
geo_thickness = float(shape_geo_thickness[s])
geo_is_solid = bool(shape_geo_is_solid[s])
geo_src = shape_geo_src[s]
name = f"shape_{s}"
# shape transform in body frame
body = int(shape_body[s])
if body >= 0 and body < len(self.body_names):
body = self.body_names[body]
else:
body = None
if self.use_unique_colors and body is not None:
color = self._get_new_color()
# shape transform in body frame
X_bs = wp.transform_expand(shape_transform[s])
# check whether we can instance an already created shape with the same geometry
geo_hash = hash((int(geo_type), geo_src, *geo_scale, geo_thickness, geo_is_solid))
if geo_hash in self.geo_shape:
shape = self.geo_shape[geo_hash]
else:
if geo_type == warp.sim.GEO_PLANE:
if s == model.shape_count - 1 and not model.ground:
continue # hide ground plane
# plane mesh
width = geo_scale[0] if geo_scale[0] > 0.0 else 100.0
length = geo_scale[1] if geo_scale[1] > 0.0 else 100.0
shape = self.render_plane(
name, p, q, width, length, color, parent_body=body, is_template=True
)
elif geo_type == warp.sim.GEO_SPHERE:
shape = self.render_sphere(
name, p, q, geo_scale[0], parent_body=body, is_template=True, color=color
)
elif geo_type == warp.sim.GEO_CAPSULE:
shape = self.render_capsule(
name, p, q, geo_scale[0], geo_scale[1], parent_body=body, is_template=True, color=color
)
elif geo_type == warp.sim.GEO_CYLINDER:
shape = self.render_cylinder(
name, p, q, geo_scale[0], geo_scale[1], parent_body=body, is_template=True, color=color
)
elif geo_type == warp.sim.GEO_CONE:
shape = self.render_cone(
name, p, q, geo_scale[0], geo_scale[1], parent_body=body, is_template=True, color=color
)
elif geo_type == warp.sim.GEO_BOX:
shape = self.render_box(
name, p, q, geo_scale, parent_body=body, is_template=True, color=color
)
elif geo_type == warp.sim.GEO_MESH:
if not geo_is_solid:
faces, vertices = solidify_mesh(geo_src.indices, geo_src.vertices, geo_thickness)
else:
faces, vertices = geo_src.indices, geo_src.vertices
shape = self.render_mesh(
name,
vertices,
faces,
pos=p,
rot=q,
scale=geo_scale,
colors=[color],
parent_body=body,
is_template=True,
)
elif geo_type == warp.sim.GEO_SDF:
continue
self.geo_shape[geo_hash] = shape
if shape_visible[s]:
# TODO support dynamic visibility
self.add_shape_instance(
name, shape, body, X_bs.p, X_bs.q, scale, custom_index=s, visible=shape_visible[s]
)
self.instance_count += 1
if self.show_joints and model.joint_count:
joint_type = model.joint_type.numpy()
joint_axis = model.joint_axis.numpy()
joint_axis_start = model.joint_axis_start.numpy()
joint_axis_dim = model.joint_axis_dim.numpy()
joint_parent = model.joint_parent.numpy()
joint_child = model.joint_child.numpy()
joint_tf = model.joint_X_p.numpy()
shape_collision_radius = model.shape_collision_radius.numpy()
y_axis = wp.vec3(0.0, 1.0, 0.0)
color = (1.0, 0.0, 1.0)
shape = self.render_arrow(
"joint_arrow",
None,
None,
base_radius=0.01,
base_height=0.4,
cap_radius=0.02,
cap_height=0.1,
parent_body=None,
is_template=True,
color=color,
)
for i, t in enumerate(joint_type):
if t not in {
warp.sim.JOINT_REVOLUTE,
# warp.sim.JOINT_PRISMATIC,
warp.sim.JOINT_UNIVERSAL,
warp.sim.JOINT_COMPOUND,
warp.sim.JOINT_D6,
}:
continue
tf = joint_tf[i]
body = int(joint_parent[i])
# if body == -1:
# continue
num_linear_axes = int(joint_axis_dim[i][0])
num_angular_axes = int(joint_axis_dim[i][1])
# find a good scale for the arrow based on the average radius
# of the shapes attached to the joint child body
scale = np.ones(3)
child = int(joint_child[i])
if child >= 0:
radii = []
for s in model.body_shapes[child]:
radii.append(shape_collision_radius[s])
if len(radii) > 0:
scale *= np.mean(radii) * 2.0
for a in range(num_linear_axes, num_linear_axes + num_angular_axes):
index = joint_axis_start[i] + a
axis = joint_axis[index]
if np.linalg.norm(axis) < 1e-6:
continue
p = wp.vec3(tf[:3])
q = wp.quat(tf[3:])
# compute rotation between axis and y
axis = axis / np.linalg.norm(axis)
q = q * wp.quat_between_vectors(wp.vec3(axis), y_axis)
name = f"joint_{i}_{a}"
self.add_shape_instance(name, shape, body, p, q, scale, color1=color, color2=color)
self.instance_count += 1
if model.ground:
self.render_ground(plane=model.ground_plane_params)
if hasattr(self, "complete_setup"):
self.complete_setup()
def _get_new_color(self):
return tab10_color_map(self.instance_count)
def render(self, state: warp.sim.State):
"""
Updates the renderer with the given simulation state.
Args:
state (warp.sim.State): The simulation state to render.
"""
if self.skip_rendering:
return
if self.model.particle_count:
particle_q = state.particle_q.numpy()
# render particles
self.render_points(
"particles", particle_q, radius=self.model.particle_radius.numpy(), colors=(0.8, 0.3, 0.2)
)
# render tris
if self.model.tri_count:
self.render_mesh(
"surface",
particle_q,
self.model.tri_indices.numpy().flatten(),
colors=(((0.75, 0.25, 0.0),) * len(particle_q)),
)
# render springs
if self.model.spring_count:
self.render_line_list(
"springs", particle_q, self.model.spring_indices.numpy().flatten(), (0.25, 0.5, 0.25), 0.02
)
# render muscles
if self.model.muscle_count:
body_q = state.body_q.numpy()
muscle_start = self.model.muscle_start.numpy()
muscle_links = self.model.muscle_bodies.numpy()
muscle_points = self.model.muscle_points.numpy()
muscle_activation = self.model.muscle_activation.numpy()
# for s in self.skeletons:
# # for mesh, link in s.mesh_map.items():
# # if link != -1:
# # X_sc = wp.transform_expand(self.state.body_X_sc[link].tolist())
# # #self.renderer.add_mesh(mesh, "../assets/snu/OBJ/" + mesh + ".usd", X_sc, 1.0, self.render_time)
# # self.renderer.add_mesh(mesh, "../assets/snu/OBJ/" + mesh + ".usd", X_sc, 1.0, self.render_time)
for m in range(self.model.muscle_count):
start = int(muscle_start[m])
end = int(muscle_start[m + 1])
points = []
for w in range(start, end):
link = muscle_links[w]
point = muscle_points[w]
X_sc = wp.transform_expand(body_q[link][0])
points.append(wp.transform_point(X_sc, point).tolist())
self.render_line_strip(
name=f"muscle_{m}", vertices=points, radius=0.0075, color=(muscle_activation[m], 0.2, 0.5)
)
# update bodies
if self.model.body_count:
self.update_body_transforms(state.body_q)
if self.show_rigid_contact_points and self.model.rigid_contact_max:
wp.launch(
kernel=compute_contact_points,
dim=self.model.rigid_contact_max,
inputs=[
state.body_q,
self.model.shape_body,
self.model.rigid_contact_count,
self.model.rigid_contact_shape0,
self.model.rigid_contact_shape1,
self.model.rigid_contact_point0,
self.model.rigid_contact_point1,
],
outputs=[
self.contact_points0,
self.contact_points1,
],
device=self.model.device,
)
self.render_points(
"contact_points0",
self.contact_points0.numpy(),
radius=self.contact_points_radius * self.scaling,
colors=self.contact_points0_colors,
)
self.render_points(
"contact_points1",
self.contact_points1.numpy(),
radius=self.contact_points_radius * self.scaling,
colors=self.contact_points1_colors,
)
return SimRenderer
SimRendererUsd = CreateSimRenderer(wp.render.UsdRenderer)
SimRendererOpenGL = CreateSimRenderer(wp.render.OpenGLRenderer)
SimRenderer = SimRendererUsd
| 17,794 | Python | 41.57177 | 128 | 0.462796 |
NVIDIA/warp/warp/sim/utils.py | from typing import List, Tuple
import numpy as np
import warp as wp
@wp.func
def velocity_at_point(qd: wp.spatial_vector, r: wp.vec3):
"""
Returns the velocity of a point relative to the frame with the given spatial velocity.
Args:
qd (spatial_vector): The spatial velocity of the frame.
r (vec3): The position of the point relative to the frame.
Returns:
vec3: The velocity of the point.
"""
return wp.cross(wp.spatial_top(qd), r) + wp.spatial_bottom(qd)
@wp.func
def quat_twist(axis: wp.vec3, q: wp.quat):
"""
Returns the twist around an axis.
"""
# project imaginary part onto axis
a = wp.vec3(q[0], q[1], q[2])
proj = wp.dot(a, axis)
a = proj * axis
# if proj < 0.0:
# # ensure twist points in same direction as axis
# a = -a
return wp.normalize(wp.quat(a[0], a[1], a[2], q[3]))
@wp.func
def quat_twist_angle(axis: wp.vec3, q: wp.quat):
"""
Returns the angle of the twist around an axis.
"""
return 2.0 * wp.acos(quat_twist(axis, q)[3])
@wp.func
def quat_decompose(q: wp.quat):
"""
Decompose a quaternion into a sequence of 3 rotations around x,y',z' respectively, i.e.: q = q_z''q_y'q_x.
"""
R = wp.mat33(
wp.quat_rotate(q, wp.vec3(1.0, 0.0, 0.0)),
wp.quat_rotate(q, wp.vec3(0.0, 1.0, 0.0)),
wp.quat_rotate(q, wp.vec3(0.0, 0.0, 1.0)),
)
# https://www.sedris.org/wg8home/Documents/WG80485.pdf
phi = wp.atan2(R[1, 2], R[2, 2])
sinp = -R[0, 2]
if wp.abs(sinp) >= 1.0:
theta = wp.HALF_PI * wp.sign(sinp)
else:
theta = wp.asin(-R[0, 2])
psi = wp.atan2(R[0, 1], R[0, 0])
return -wp.vec3(phi, theta, psi)
@wp.func
def quat_to_rpy(q: wp.quat):
"""
Convert a quaternion into Euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
x = q[0]
y = q[1]
z = q[2]
w = q[3]
t0 = 2.0 * (w * x + y * z)
t1 = 1.0 - 2.0 * (x * x + y * y)
roll_x = wp.atan2(t0, t1)
t2 = 2.0 * (w * y - z * x)
t2 = wp.clamp(t2, -1.0, 1.0)
pitch_y = wp.asin(t2)
t3 = 2.0 * (w * z + x * y)
t4 = 1.0 - 2.0 * (y * y + z * z)
yaw_z = wp.atan2(t3, t4)
return wp.vec3(roll_x, pitch_y, yaw_z)
@wp.func
def quat_to_euler(q: wp.quat, i: int, j: int, k: int) -> wp.vec3:
"""
Convert a quaternion into Euler angles.
:math:`i, j, k` are the indices in :math:`[0, 1, 2]` of the axes to use
(:math:`i \\neq j, j \\neq k`).
Reference: https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0276302
Args:
q (quat): The quaternion to convert
i (int): The index of the first axis
j (int): The index of the second axis
k (int): The index of the third axis
Returns:
vec3: The Euler angles (in radians)
"""
# i, j, k are actually assumed to follow 1-based indexing but
# we want to be compatible with quat_from_euler
i += 1
j += 1
k += 1
not_proper = True
if i == k:
not_proper = False
k = 6 - i - j # because i + j + k = 1 + 2 + 3 = 6
e = float((i - j) * (j - k) * (k - i)) / 2.0 # Levi-Civita symbol
a = q[0]
b = q[i]
c = q[j]
d = q[k] * e
if not_proper:
a -= q[j]
b += q[k] * e
c += q[0]
d -= q[i]
t2 = wp.acos(2.0 * (a * a + b * b) / (a * a + b * b + c * c + d * d) - 1.0)
tp = wp.atan2(b, a)
tm = wp.atan2(d, c)
t1 = 0.0
t3 = 0.0
if wp.abs(t2) < 1e-6:
t3 = 2.0 * tp - t1
elif wp.abs(t2 - wp.HALF_PI) < 1e-6:
t3 = 2.0 * tm + t1
else:
t1 = tp - tm
t3 = tp + tm
if not_proper:
t2 -= wp.HALF_PI
t3 *= e
return wp.vec3(t1, t2, t3)
@wp.func
def quat_from_euler(e: wp.vec3, i: int, j: int, k: int) -> wp.quat:
"""
Convert Euler angles to a quaternion.
:math:`i, j, k` are the indices in :math:`[0, 1, 2]` of the axes in which the Euler angles are provided
(:math:`i \\neq j, j \\neq k`), e.g. (0, 1, 2) for Euler sequence XYZ.
Args:
e (vec3): The Euler angles (in radians)
i (int): The index of the first axis
j (int): The index of the second axis
k (int): The index of the third axis
Returns:
quat: The quaternion
"""
# Half angles
half_e = e / 2.0
# Precompute sines and cosines of half angles
cr = wp.cos(half_e[i])
sr = wp.sin(half_e[i])
cp = wp.cos(half_e[j])
sp = wp.sin(half_e[j])
cy = wp.cos(half_e[k])
sy = wp.sin(half_e[k])
# Components of the quaternion based on the rotation sequence
return wp.quat(
(cy * sr * cp - sy * cr * sp),
(cy * cr * sp + sy * sr * cp),
(sy * cr * cp - cy * sr * sp),
(cy * cr * cp + sy * sr * sp),
)
@wp.func
def transform_twist(t: wp.transform, x: wp.spatial_vector):
# Frank & Park definition 3.20, pg 100
q = wp.transform_get_rotation(t)
p = wp.transform_get_translation(t)
w = wp.spatial_top(x)
v = wp.spatial_bottom(x)
w = wp.quat_rotate(q, w)
v = wp.quat_rotate(q, v) + wp.cross(p, w)
return wp.spatial_vector(w, v)
@wp.func
def transform_wrench(t: wp.transform, x: wp.spatial_vector):
q = wp.transform_get_rotation(t)
p = wp.transform_get_translation(t)
w = wp.spatial_top(x)
v = wp.spatial_bottom(x)
v = wp.quat_rotate(q, v)
w = wp.quat_rotate(q, w) + wp.cross(p, v)
return wp.spatial_vector(w, v)
@wp.func
def transform_inertia(t: wp.transform, I: wp.spatial_matrix):
"""
Computes adj_t^-T*I*adj_t^-1 (tensor change of coordinates).
(Frank & Park, section 8.2.3, pg 290)
"""
t_inv = wp.transform_inverse(t)
q = wp.transform_get_rotation(t_inv)
p = wp.transform_get_translation(t_inv)
r1 = wp.quat_rotate(q, wp.vec3(1.0, 0.0, 0.0))
r2 = wp.quat_rotate(q, wp.vec3(0.0, 1.0, 0.0))
r3 = wp.quat_rotate(q, wp.vec3(0.0, 0.0, 1.0))
R = wp.mat33(r1, r2, r3)
S = wp.mul(wp.skew(p), R)
T = wp.spatial_adjoint(R, S)
return wp.mul(wp.mul(wp.transpose(T), I), T)
@wp.func
def boltzmann(a: float, b: float, alpha: float):
e1 = wp.exp(alpha * a)
e2 = wp.exp(alpha * b)
return (a * e1 + b * e2) / (e1 + e2)
@wp.func
def smooth_max(a: float, b: float, eps: float):
d = a - b
return 0.5 * (a + b + wp.sqrt(d * d + eps))
@wp.func
def smooth_min(a: float, b: float, eps: float):
d = a - b
return 0.5 * (a + b - wp.sqrt(d * d + eps))
@wp.func
def leaky_max(a: float, b: float):
return smooth_max(a, b, 1e-5)
@wp.func
def leaky_min(a: float, b: float):
return smooth_min(a, b, 1e-5)
@wp.func
def vec_min(a: wp.vec3, b: wp.vec3):
return wp.vec3(wp.min(a[0], b[0]), wp.min(a[1], b[1]), wp.min(a[2], b[2]))
@wp.func
def vec_max(a: wp.vec3, b: wp.vec3):
return wp.vec3(wp.max(a[0], b[0]), wp.max(a[1], b[1]), wp.max(a[2], b[2]))
@wp.func
def vec_leaky_min(a: wp.vec3, b: wp.vec3):
return wp.vec3(leaky_min(a[0], b[0]), leaky_min(a[1], b[1]), leaky_min(a[2], b[2]))
@wp.func
def vec_leaky_max(a: wp.vec3, b: wp.vec3):
return wp.vec3(leaky_max(a[0], b[0]), leaky_max(a[1], b[1]), leaky_max(a[2], b[2]))
@wp.func
def vec_abs(a: wp.vec3):
return wp.vec3(wp.abs(a[0]), wp.abs(a[1]), wp.abs(a[2]))
def load_mesh(filename: str, method: str = None):
"""
Loads a 3D triangular surface mesh from a file.
Args:
filename (str): The path to the 3D model file (obj, and other formats supported by the different methods) to load.
method (str): The method to use for loading the mesh (default None). Can be either `"trimesh"`, `"meshio"`, `"pcu"`, or `"openmesh"`. If None, every method is tried and the first successful mesh import where the number of vertices is greater than 0 is returned.
Returns:
Tuple of (mesh_points, mesh_indices), where mesh_points is a Nx3 numpy array of vertex positions (float32),
and mesh_indices is a Mx3 numpy array of vertex indices (int32) for the triangular faces.
"""
import os
if not os.path.exists(filename):
raise ValueError(f"File not found: {filename}")
def load_mesh_with_method(method):
if method == "meshio":
import meshio
m = meshio.read(filename)
mesh_points = np.array(m.points)
mesh_indices = np.array(m.cells[0].data, dtype=np.int32)
elif method == "openmesh":
import openmesh
m = openmesh.read_trimesh(filename)
mesh_points = np.array(m.points())
mesh_indices = np.array(m.face_vertex_indices(), dtype=np.int32)
elif method == "pcu":
import point_cloud_utils as pcu
mesh_points, mesh_indices = pcu.load_mesh_vf(filename)
mesh_indices = mesh_indices.flatten()
else:
import trimesh
m = trimesh.load(filename)
if hasattr(m, "geometry"):
# multiple meshes are contained in a scene; combine to one mesh
mesh_points = []
mesh_indices = []
index_offset = 0
for geom in m.geometry.values():
vertices = np.array(geom.vertices, dtype=np.float32)
faces = np.array(geom.faces.flatten(), dtype=np.int32)
mesh_points.append(vertices)
mesh_indices.append(faces + index_offset)
index_offset += len(vertices)
mesh_points = np.concatenate(mesh_points, axis=0)
mesh_indices = np.concatenate(mesh_indices)
else:
# a single mesh
mesh_points = np.array(m.vertices, dtype=np.float32)
mesh_indices = np.array(m.faces.flatten(), dtype=np.int32)
return mesh_points, mesh_indices
if method is None:
methods = ["trimesh", "meshio", "pcu", "openmesh"]
for method in methods:
try:
mesh = load_mesh_with_method(method)
if mesh is not None and len(mesh[0]) > 0:
return mesh
except Exception:
pass
raise ValueError(f"Failed to load mesh using any of the methods: {methods}")
else:
mesh = load_mesh_with_method(method)
if mesh is None or len(mesh[0]) == 0:
raise ValueError(f"Failed to load mesh using method {method}")
return mesh
def visualize_meshes(
meshes: List[Tuple[list, list]], num_cols=0, num_rows=0, titles=None, scale_axes=True, show_plot=True
):
# render meshes in a grid with matplotlib
import matplotlib.pyplot as plt
if titles is None:
titles = []
num_cols = min(num_cols, len(meshes))
num_rows = min(num_rows, len(meshes))
if num_cols and not num_rows:
num_rows = int(np.ceil(len(meshes) / num_cols))
elif num_rows and not num_cols:
num_cols = int(np.ceil(len(meshes) / num_rows))
else:
num_cols = len(meshes)
num_rows = 1
vertices = [np.array(v).reshape((-1, 3)) for v, _ in meshes]
faces = [np.array(f, dtype=np.int32).reshape((-1, 3)) for _, f in meshes]
if scale_axes:
ranges = np.array([v.max(axis=0) - v.min(axis=0) for v in vertices])
max_range = ranges.max()
mid_points = np.array([v.max(axis=0) + v.min(axis=0) for v in vertices]) * 0.5
fig = plt.figure(figsize=(12, 6))
for i, (vertices, faces) in enumerate(meshes):
ax = fig.add_subplot(num_rows, num_cols, i + 1, projection="3d")
if i < len(titles):
ax.set_title(titles[i])
ax.plot_trisurf(vertices[:, 0], vertices[:, 1], vertices[:, 2], triangles=faces, edgecolor="k")
if scale_axes:
mid = mid_points[i]
ax.set_xlim(mid[0] - max_range, mid[0] + max_range)
ax.set_ylim(mid[1] - max_range, mid[1] + max_range)
ax.set_zlim(mid[2] - max_range, mid[2] + max_range)
if show_plot:
plt.show()
return fig
| 12,194 | Python | 28.456522 | 269 | 0.556175 |
NVIDIA/warp/warp/sim/model.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""A module for building simulation models and state."""
import copy
import math
from typing import List, Optional, Tuple
import numpy as np
import warp as wp
from .inertia import (
compute_box_inertia,
compute_capsule_inertia,
compute_cone_inertia,
compute_cylinder_inertia,
compute_mesh_inertia,
compute_sphere_inertia,
transform_inertia,
)
Vec3 = List[float]
Vec4 = List[float]
Quat = List[float]
Mat33 = List[float]
Transform = Tuple[Vec3, Quat]
# Particle flags
PARTICLE_FLAG_ACTIVE = wp.constant(wp.uint32(1 << 0))
# Shape geometry types
GEO_SPHERE = wp.constant(0)
GEO_BOX = wp.constant(1)
GEO_CAPSULE = wp.constant(2)
GEO_CYLINDER = wp.constant(3)
GEO_CONE = wp.constant(4)
GEO_MESH = wp.constant(5)
GEO_SDF = wp.constant(6)
GEO_PLANE = wp.constant(7)
GEO_NONE = wp.constant(8)
# Types of joints linking rigid bodies
JOINT_PRISMATIC = wp.constant(0)
JOINT_REVOLUTE = wp.constant(1)
JOINT_BALL = wp.constant(2)
JOINT_FIXED = wp.constant(3)
JOINT_FREE = wp.constant(4)
JOINT_COMPOUND = wp.constant(5)
JOINT_UNIVERSAL = wp.constant(6)
JOINT_DISTANCE = wp.constant(7)
JOINT_D6 = wp.constant(8)
# Joint axis control mode types
JOINT_MODE_FORCE = wp.constant(0)
JOINT_MODE_TARGET_POSITION = wp.constant(1)
JOINT_MODE_TARGET_VELOCITY = wp.constant(2)
def flag_to_int(flag):
"""Converts a flag to an integer."""
if type(flag) in wp.types.int_types:
return flag.value
return int(flag)
# Material properties pertaining to rigid shape contact dynamics
@wp.struct
class ModelShapeMaterials:
ke: wp.array(dtype=float) # The contact elastic stiffness (only used by the Euler integrators)
kd: wp.array(dtype=float) # The contact damping stiffness (only used by the Euler integrators)
kf: wp.array(dtype=float) # The contact friction stiffness (only used by the Euler integrators)
ka: wp.array(
dtype=float
) # The contact adhesion distance (values greater than 0 mean adhesive contact; only used by the Euler integrators)
mu: wp.array(dtype=float) # The coefficient of friction
restitution: wp.array(dtype=float) # The coefficient of restitution (only used by XPBD integrator)
# Shape properties of geometry
@wp.struct
class ModelShapeGeometry:
type: wp.array(dtype=wp.int32) # The type of geometry (GEO_SPHERE, GEO_BOX, etc.)
is_solid: wp.array(dtype=wp.uint8) # Indicates whether the shape is solid or hollow
thickness: wp.array(
dtype=float
) # The thickness of the shape (used for collision detection, and inertia computation of hollow shapes)
source: wp.array(dtype=wp.uint64) # Pointer to the source geometry (in case of a mesh, zero otherwise)
scale: wp.array(dtype=wp.vec3) # The 3D scale of the shape
# Axis (linear or angular) of a joint that can have bounds and be driven towards a target
class JointAxis:
"""
Describes a joint axis that can have limits and be driven towards a target.
Attributes:
axis (3D vector or JointAxis): The 3D axis that this JointAxis object describes, or alternatively another JointAxis object to copy from
limit_lower (float): The lower position limit of the joint axis
limit_upper (float): The upper position limit of the joint axis
limit_ke (float): The elastic stiffness of the joint axis limits, only respected by :class:`SemiImplicitIntegrator` and :class:`FeatherstoneIntegrator`
limit_kd (float): The damping stiffness of the joint axis limits, only respected by :class:`SemiImplicitIntegrator` and :class:`FeatherstoneIntegrator`
action (float): The force applied by default to this joint axis, or the target position or velocity (depending on the mode, see `Joint modes`_) of the joint axis
target_ke (float): The proportional gain of the joint axis target drive PD controller
target_kd (float): The derivative gain of the joint axis target drive PD controller
mode (int): The mode of the joint axis, see `Joint modes`_
"""
def __init__(
self,
axis,
limit_lower=-math.inf,
limit_upper=math.inf,
limit_ke=100.0,
limit_kd=10.0,
action=None,
target_ke=0.0,
target_kd=0.0,
mode=JOINT_MODE_FORCE,
):
if isinstance(axis, JointAxis):
self.axis = axis.axis
self.limit_lower = axis.limit_lower
self.limit_upper = axis.limit_upper
self.limit_ke = axis.limit_ke
self.limit_kd = axis.limit_kd
self.action = axis.action
self.target_ke = axis.target_ke
self.target_kd = axis.target_kd
self.mode = axis.mode
else:
self.axis = wp.normalize(wp.vec3(axis))
self.limit_lower = limit_lower
self.limit_upper = limit_upper
self.limit_ke = limit_ke
self.limit_kd = limit_kd
if action is not None:
self.action = action
elif mode == JOINT_MODE_TARGET_POSITION and (limit_lower > 0.0 or limit_upper < 0.0):
self.action = 0.5 * (limit_lower + limit_upper)
else:
self.action = 0.0
self.target_ke = target_ke
self.target_kd = target_kd
self.mode = mode
class SDF:
"""Describes a signed distance field for simulation
Attributes:
volume (Volume): The volume defining the SDF
I (Mat33): 3x3 inertia matrix of the SDF
mass (float): The total mass of the SDF
com (Vec3): The center of mass of the SDF
"""
def __init__(self, volume=None, I=None, mass=1.0, com=None):
self.volume = volume
self.I = I if I is not None else wp.mat33(np.eye(3))
self.mass = mass
self.com = com if com is not None else wp.vec3()
# Need to specify these for now
self.has_inertia = True
self.is_solid = True
def finalize(self, device=None):
return self.volume.id
def __hash__(self):
return hash((self.volume.id))
class Mesh:
"""Describes a triangle collision mesh for simulation
Example mesh creation from a triangle OBJ mesh file:
====================================================
See :func:`load_mesh` which is provided as a utility function.
.. code-block:: python
import numpy as np
import warp as wp
import warp.sim
import openmesh
m = openmesh.read_trimesh("mesh.obj")
mesh_points = np.array(m.points())
mesh_indices = np.array(m.face_vertex_indices(), dtype=np.int32).flatten()
mesh = wp.sim.Mesh(mesh_points, mesh_indices)
Attributes:
vertices (List[Vec3]): Mesh 3D vertices points
indices (List[int]): Mesh indices as a flattened list of vertex indices describing triangles
I (Mat33): 3x3 inertia matrix of the mesh assuming density of 1.0 (around the center of mass)
mass (float): The total mass of the body assuming density of 1.0
com (Vec3): The center of mass of the body
"""
def __init__(self, vertices: List[Vec3], indices: List[int], compute_inertia=True, is_solid=True):
"""Construct a Mesh object from a triangle mesh
The mesh center of mass and inertia tensor will automatically be
calculated using a density of 1.0. This computation is only valid
if the mesh is closed (two-manifold).
Args:
vertices: List of vertices in the mesh
indices: List of triangle indices, 3 per-element
compute_inertia: If True, the mass, inertia tensor and center of mass will be computed assuming density of 1.0
is_solid: If True, the mesh is assumed to be a solid during inertia computation, otherwise it is assumed to be a hollow surface
"""
self.vertices = np.array(vertices).reshape(-1, 3)
self.indices = np.array(indices, dtype=np.int32).flatten()
self.is_solid = is_solid
self.has_inertia = compute_inertia
if compute_inertia:
self.mass, self.com, self.I, _ = compute_mesh_inertia(1.0, vertices, indices, is_solid=is_solid)
else:
self.I = wp.mat33(np.eye(3))
self.mass = 1.0
self.com = wp.vec3()
# construct simulation ready buffers from points
def finalize(self, device=None):
"""
Constructs a simulation-ready :class:`Mesh` object from the mesh data and returns its ID.
Args:
device: The device on which to allocate the mesh buffers
Returns:
The ID of the simulation-ready :class:`Mesh`
"""
with wp.ScopedDevice(device):
pos = wp.array(self.vertices, dtype=wp.vec3)
vel = wp.zeros_like(pos)
indices = wp.array(self.indices, dtype=wp.int32)
self.mesh = wp.Mesh(points=pos, velocities=vel, indices=indices)
return self.mesh.id
def __hash__(self):
"""
Computes a hash of the mesh data for use in caching. The hash considers the mesh vertices, indices, and whether the mesh is solid or not.
"""
return hash((tuple(np.array(self.vertices).flatten()), tuple(np.array(self.indices).flatten()), self.is_solid))
class State:
"""The State object holds all *time-varying* data for a model.
Time-varying data includes particle positions, velocities, rigid body states, and
anything that is output from the integrator as derived data, e.g.: forces.
The exact attributes depend on the contents of the model. State objects should
generally be created using the :func:`Model.state()` function.
Attributes:
particle_q (array): Array of 3D particle positions, shape [particle_count], :class:`vec3`
particle_qd (array): Array of 3D particle velocities, shape [particle_count], :class:`vec3`
particle_f (array): Array of 3D particle forces, shape [particle_count], :class:`vec3`
body_q (array): Array of body coordinates (7-dof transforms) in maximal coordinates, shape [body_count], :class:`transform`
body_qd (array): Array of body velocities in maximal coordinates (first 3 entries represent angular velocity, last 3 entries represent linear velocity), shape [body_count], :class:`spatial_vector`
body_f (array): Array of body forces in maximal coordinates (first 3 entries represent torque, last 3 entries represent linear force), shape [body_count], :class:`spatial_vector`
Note:
:attr:`body_f` represents external wrenches in world frame and denotes wrenches measured w.r.t. to the body's center of mass for all integrators except :class:`FeatherstoneIntegrator` which assumes the wrenches are measured w.r.t. world origin.
joint_q (array): Array of generalized joint coordinates, shape [joint_coord_count], float
joint_qd (array): Array of generalized joint velocities, shape [joint_dof_count], float
"""
def __init__(self):
self.particle_q = None
self.particle_qd = None
self.particle_f = None
self.body_q = None
self.body_qd = None
self.body_f = None
self.joint_q = None
self.joint_qd = None
def clear_forces(self):
"""Clears all forces (for particles and bodies) in the state object."""
with wp.ScopedTimer("clear_forces", False):
if self.particle_count:
self.particle_f.zero_()
if self.body_count:
self.body_f.zero_()
@property
def requires_grad(self):
"""Indicates whether the state arrays have gradient computation enabled."""
if self.particle_q:
return self.particle_q.requires_grad
if self.body_q:
return self.body_q.requires_grad
return False
@property
def body_count(self):
"""The number of bodies represented in the state."""
if self.body_q is None:
return 0
return len(self.body_q)
@property
def particle_count(self):
"""The number of particles represented in the state."""
if self.particle_q is None:
return 0
return len(self.particle_q)
@property
def joint_coord_count(self):
"""The number of generalized joint position coordinates represented in the state."""
if self.joint_q is None:
return 0
return len(self.joint_q)
@property
def joint_dof_count(self):
"""The number of generalized joint velocity coordinates represented in the state."""
if self.joint_qd is None:
return 0
return len(self.joint_qd)
class Control:
"""
The Control object holds all *time-varying* control data for a model.
Time-varying control data includes joint control inputs, muscle activations, and activation forces for triangle and tetrahedral elements.
The exact attributes depend on the contents of the model. Control objects should generally be created using the :func:`Model.control()` function.
Attributes:
joint_act (array): Array of joint control inputs, shape [joint_axis_count], float
tri_activations (array): Array of triangle element activations, shape [tri_count], float
tet_activations (array): Array of tetrahedral element activations, shape [tet_count], float
muscle_activations (array): Array of muscle activations, shape [muscle_count], float
"""
def __init__(self, model):
"""
Args:
model (Model): The model to use as a reference for the control inputs
"""
self.model = model
self.joint_act = None
self.tri_activations = None
self.tet_activations = None
self.muscle_activations = None
def reset(self):
"""
Resets the control inputs to their initial state defined in :class:`Model`.
"""
if self.joint_act is not None:
self.joint_act.assign(self.model.joint_act)
if self.tri_activations is not None:
self.tri_activations.assign(self.model.tri_activations)
if self.tet_activations is not None:
self.tet_activations.assign(self.model.tet_activations)
if self.muscle_activations is not None:
self.muscle_activations.assign(self.model.muscle_activations)
def compute_shape_mass(type, scale, src, density, is_solid, thickness):
"""Computes the mass, center of mass and 3x3 inertia tensor of a shape
Args:
type: The type of shape (GEO_SPHERE, GEO_BOX, etc.)
scale: The scale of the shape
src: The source shape (Mesh or SDF)
density: The density of the shape
is_solid: Whether the shape is solid or hollow
thickness: The thickness of the shape (used for collision detection, and inertia computation of hollow shapes)
Returns:
The mass, center of mass and 3x3 inertia tensor of the shape
"""
if density == 0.0 or type == GEO_PLANE: # zero density means fixed
return 0.0, wp.vec3(), wp.mat33()
if type == GEO_SPHERE:
solid = compute_sphere_inertia(density, scale[0])
if is_solid:
return solid
else:
hollow = compute_sphere_inertia(density, scale[0] - thickness)
return solid[0] - hollow[0], solid[1], solid[2] - hollow[2]
elif type == GEO_BOX:
w, h, d = scale * 2.0
solid = compute_box_inertia(density, w, h, d)
if is_solid:
return solid
else:
hollow = compute_box_inertia(density, w - thickness, h - thickness, d - thickness)
return solid[0] - hollow[0], solid[1], solid[2] - hollow[2]
elif type == GEO_CAPSULE:
r, h = scale[0], scale[1] * 2.0
solid = compute_capsule_inertia(density, r, h)
if is_solid:
return solid
else:
hollow = compute_capsule_inertia(density, r - thickness, h - 2.0 * thickness)
return solid[0] - hollow[0], solid[1], solid[2] - hollow[2]
elif type == GEO_CYLINDER:
r, h = scale[0], scale[1] * 2.0
solid = compute_cylinder_inertia(density, r, h)
if is_solid:
return solid
else:
hollow = compute_cylinder_inertia(density, r - thickness, h - 2.0 * thickness)
return solid[0] - hollow[0], solid[1], solid[2] - hollow[2]
elif type == GEO_CONE:
r, h = scale[0], scale[1] * 2.0
solid = compute_cone_inertia(density, r, h)
if is_solid:
return solid
else:
hollow = compute_cone_inertia(density, r - thickness, h - 2.0 * thickness)
return solid[0] - hollow[0], solid[1], solid[2] - hollow[2]
elif type == GEO_MESH or type == GEO_SDF:
if src.has_inertia and src.mass > 0.0 and src.is_solid == is_solid:
m, c, I = src.mass, src.com, src.I
sx, sy, sz = scale
mass_ratio = sx * sy * sz * density
m_new = m * mass_ratio
c_new = wp.cw_mul(c, scale)
Ixx = I[0, 0] * (sy**2 + sz**2) / 2 * mass_ratio
Iyy = I[1, 1] * (sx**2 + sz**2) / 2 * mass_ratio
Izz = I[2, 2] * (sx**2 + sy**2) / 2 * mass_ratio
Ixy = I[0, 1] * sx * sy * mass_ratio
Ixz = I[0, 2] * sx * sz * mass_ratio
Iyz = I[1, 2] * sy * sz * mass_ratio
I_new = wp.mat33([[Ixx, Ixy, Ixz], [Ixy, Iyy, Iyz], [Ixz, Iyz, Izz]])
return m_new, c_new, I_new
elif type == GEO_MESH:
# fall back to computing inertia from mesh geometry
vertices = np.array(src.vertices) * np.array(scale)
m, c, I, vol = compute_mesh_inertia(density, vertices, src.indices, is_solid, thickness)
return m, c, I
raise ValueError("Unsupported shape type: {}".format(type))
class Model:
"""Holds the definition of the simulation model
This class holds the non-time varying description of the system, i.e.:
all geometry, constraints, and parameters used to describe the simulation.
Attributes:
requires_grad (float): Indicates whether the model was finalized (see :meth:`ModelBuilder.finalize`) with gradient computation enabled
num_envs (int): Number of articulation environments that were added to the ModelBuilder via `add_builder`
particle_q (array): Particle positions, shape [particle_count, 3], float
particle_qd (array): Particle velocities, shape [particle_count, 3], float
particle_mass (array): Particle mass, shape [particle_count], float
particle_inv_mass (array): Particle inverse mass, shape [particle_count], float
particle_radius (array): Particle radius, shape [particle_count], float
particle_max_radius (float): Maximum particle radius (useful for HashGrid construction)
particle_ke (array): Particle normal contact stiffness (used by :class:`SemiImplicitIntegrator`), shape [particle_count], float
particle_kd (array): Particle normal contact damping (used by :class:`SemiImplicitIntegrator`), shape [particle_count], float
particle_kf (array): Particle friction force stiffness (used by :class:`SemiImplicitIntegrator`), shape [particle_count], float
particle_mu (array): Particle friction coefficient, shape [particle_count], float
particle_cohesion (array): Particle cohesion strength, shape [particle_count], float
particle_adhesion (array): Particle adhesion strength, shape [particle_count], float
particle_grid (HashGrid): HashGrid instance used for accelerated simulation of particle interactions
particle_flags (array): Particle enabled state, shape [particle_count], bool
particle_max_velocity (float): Maximum particle velocity (to prevent instability)
shape_transform (array): Rigid shape transforms, shape [shape_count, 7], float
shape_visible (array): Rigid shape visibility, shape [shape_count], bool
shape_body (array): Rigid shape body index, shape [shape_count], int
body_shapes (dict): Mapping from body index to list of attached shape indices
shape_materials (ModelShapeMaterials): Rigid shape contact materials, shape [shape_count], float
shape_shape_geo (ModelShapeGeometry): Shape geometry properties (geo type, scale, thickness, etc.), shape [shape_count, 3], float
shape_geo_src (list): List of `wp.Mesh` instances used for rendering of mesh geometry
shape_collision_group (list): Collision group of each shape, shape [shape_count], int
shape_collision_group_map (dict): Mapping from collision group to list of shape indices
shape_collision_filter_pairs (set): Pairs of shape indices that should not collide
shape_collision_radius (array): Collision radius of each shape used for bounding sphere broadphase collision checking, shape [shape_count], float
shape_ground_collision (list): Indicates whether each shape should collide with the ground, shape [shape_count], bool
shape_shape_collision (list): Indicates whether each shape should collide with any other shape, shape [shape_count], bool
shape_contact_pairs (array): Pairs of shape indices that may collide, shape [contact_pair_count, 2], int
shape_ground_contact_pairs (array): Pairs of shape, ground indices that may collide, shape [ground_contact_pair_count, 2], int
spring_indices (array): Particle spring indices, shape [spring_count*2], int
spring_rest_length (array): Particle spring rest length, shape [spring_count], float
spring_stiffness (array): Particle spring stiffness, shape [spring_count], float
spring_damping (array): Particle spring damping, shape [spring_count], float
spring_control (array): Particle spring activation, shape [spring_count], float
tri_indices (array): Triangle element indices, shape [tri_count*3], int
tri_poses (array): Triangle element rest pose, shape [tri_count, 2, 2], float
tri_activations (array): Triangle element activations, shape [tri_count], float
tri_materials (array): Triangle element materials, shape [tri_count, 5], float
edge_indices (array): Bending edge indices, shape [edge_count*4], int
edge_rest_angle (array): Bending edge rest angle, shape [edge_count], float
edge_bending_properties (array): Bending edge stiffness and damping parameters, shape [edge_count, 2], float
tet_indices (array): Tetrahedral element indices, shape [tet_count*4], int
tet_poses (array): Tetrahedral rest poses, shape [tet_count, 3, 3], float
tet_activations (array): Tetrahedral volumetric activations, shape [tet_count], float
tet_materials (array): Tetrahedral elastic parameters in form :math:`k_{mu}, k_{lambda}, k_{damp}`, shape [tet_count, 3]
muscle_start (array): Start index of the first muscle point per muscle, shape [muscle_count], int
muscle_params (array): Muscle parameters, shape [muscle_count, 5], float
muscle_bodies (array): Body indices of the muscle waypoints, int
muscle_points (array): Local body offset of the muscle waypoints, float
muscle_activations (array): Muscle activations, shape [muscle_count], float
body_q (array): Poses of rigid bodies used for state initialization, shape [body_count, 7], float
body_qd (array): Velocities of rigid bodies used for state initialization, shape [body_count, 6], float
body_com (array): Rigid body center of mass (in local frame), shape [body_count, 7], float
body_inertia (array): Rigid body inertia tensor (relative to COM), shape [body_count, 3, 3], float
body_inv_inertia (array): Rigid body inverse inertia tensor (relative to COM), shape [body_count, 3, 3], float
body_mass (array): Rigid body mass, shape [body_count], float
body_inv_mass (array): Rigid body inverse mass, shape [body_count], float
body_name (list): Rigid body names, shape [body_count], str
joint_q (array): Generalized joint positions used for state initialization, shape [joint_coord_count], float
joint_qd (array): Generalized joint velocities used for state initialization, shape [joint_dof_count], float
joint_act (array): Generalized joint control inputs, shape [joint_axis_count], float
joint_type (array): Joint type, shape [joint_count], int
joint_parent (array): Joint parent body indices, shape [joint_count], int
joint_child (array): Joint child body indices, shape [joint_count], int
joint_X_p (array): Joint transform in parent frame, shape [joint_count, 7], float
joint_X_c (array): Joint mass frame in child frame, shape [joint_count, 7], float
joint_axis (array): Joint axis in child frame, shape [joint_axis_count, 3], float
joint_armature (array): Armature for each joint axis (only used by :class:`FeatherstoneIntegrator`), shape [joint_count], float
joint_target_ke (array): Joint stiffness, shape [joint_axis_count], float
joint_target_kd (array): Joint damping, shape [joint_axis_count], float
joint_axis_start (array): Start index of the first axis per joint, shape [joint_count], int
joint_axis_dim (array): Number of linear and angular axes per joint, shape [joint_count, 2], int
joint_axis_mode (array): Joint axis mode, shape [joint_axis_count], int. See `Joint modes`_.
joint_linear_compliance (array): Joint linear compliance, shape [joint_count], float
joint_angular_compliance (array): Joint linear compliance, shape [joint_count], float
joint_enabled (array): Controls which joint is simulated (bodies become disconnected if False), shape [joint_count], int
Note:
This setting is not supported by :class:`FeatherstoneIntegrator`.
joint_limit_lower (array): Joint lower position limits, shape [joint_count], float
joint_limit_upper (array): Joint upper position limits, shape [joint_count], float
joint_limit_ke (array): Joint position limit stiffness (used by the Euler integrators), shape [joint_count], float
joint_limit_kd (array): Joint position limit damping (used by the Euler integrators), shape [joint_count], float
joint_twist_lower (array): Joint lower twist limit, shape [joint_count], float
joint_twist_upper (array): Joint upper twist limit, shape [joint_count], float
joint_q_start (array): Start index of the first position coordinate per joint, shape [joint_count], int
joint_qd_start (array): Start index of the first velocity coordinate per joint, shape [joint_count], int
articulation_start (array): Articulation start index, shape [articulation_count], int
joint_name (list): Joint names, shape [joint_count], str
joint_attach_ke (float): Joint attachment force stiffness (used by :class:`SemiImplicitIntegrator`)
joint_attach_kd (float): Joint attachment force damping (used by :class:`SemiImplicitIntegrator`)
soft_contact_margin (float): Contact margin for generation of soft contacts
soft_contact_ke (float): Stiffness of soft contacts (used by the Euler integrators)
soft_contact_kd (float): Damping of soft contacts (used by the Euler integrators)
soft_contact_kf (float): Stiffness of friction force in soft contacts (used by the Euler integrators)
soft_contact_mu (float): Friction coefficient of soft contacts
soft_contact_restitution (float): Restitution coefficient of soft contacts (used by :class:`XPBDIntegrator`)
soft_contact_count (array): Number of active particle-shape contacts, shape [1], int
soft_contact_particle (array), Index of particle per soft contact point, shape [soft_contact_max], int
soft_contact_shape (array), Index of shape per soft contact point, shape [soft_contact_max], int
soft_contact_body_pos (array), Positional offset of soft contact point in body frame, shape [soft_contact_max], vec3
soft_contact_body_vel (array), Linear velocity of soft contact point in body frame, shape [soft_contact_max], vec3
soft_contact_normal (array), Contact surface normal of soft contact point in world space, shape [soft_contact_max], vec3
rigid_contact_max (int): Maximum number of potential rigid body contact points to generate ignoring the `rigid_mesh_contact_max` limit.
rigid_contact_max_limited (int): Maximum number of potential rigid body contact points to generate respecting the `rigid_mesh_contact_max` limit.
rigid_mesh_contact_max (int): Maximum number of rigid body contact points to generate per mesh (0 = unlimited, default)
rigid_contact_margin (float): Contact margin for generation of rigid body contacts
rigid_contact_torsional_friction (float): Torsional friction coefficient for rigid body contacts (used by :class:`XPBDIntegrator`)
rigid_contact_rolling_friction (float): Rolling friction coefficient for rigid body contacts (used by :class:`XPBDIntegrator`)
rigid_contact_count (array): Number of active shape-shape contacts, shape [1], int
rigid_contact_point0 (array): Contact point relative to frame of body 0, shape [rigid_contact_max], vec3
rigid_contact_point1 (array): Contact point relative to frame of body 1, shape [rigid_contact_max], vec3
rigid_contact_offset0 (array): Contact offset due to contact thickness relative to body 0, shape [rigid_contact_max], vec3
rigid_contact_offset1 (array): Contact offset due to contact thickness relative to body 1, shape [rigid_contact_max], vec3
rigid_contact_normal (array): Contact normal in world space, shape [rigid_contact_max], vec3
rigid_contact_thickness (array): Total contact thickness, shape [rigid_contact_max], float
rigid_contact_shape0 (array): Index of shape 0 per contact, shape [rigid_contact_max], int
rigid_contact_shape1 (array): Index of shape 1 per contact, shape [rigid_contact_max], int
ground (bool): Whether the ground plane and ground contacts are enabled
ground_plane (array): Ground plane 3D normal and offset, shape [4], float
up_vector (np.ndarray): Up vector of the world, shape [3], float
up_axis (int): Up axis, 0 for x, 1 for y, 2 for z
gravity (np.ndarray): Gravity vector, shape [3], float
particle_count (int): Total number of particles in the system
body_count (int): Total number of bodies in the system
shape_count (int): Total number of shapes in the system
joint_count (int): Total number of joints in the system
tri_count (int): Total number of triangles in the system
tet_count (int): Total number of tetrahedra in the system
edge_count (int): Total number of edges in the system
spring_count (int): Total number of springs in the system
contact_count (int): Total number of contacts in the system
muscle_count (int): Total number of muscles in the system
articulation_count (int): Total number of articulations in the system
joint_dof_count (int): Total number of velocity degrees of freedom of all joints in the system
joint_coord_count (int): Total number of position degrees of freedom of all joints in the system
device (wp.Device): Device on which the Model was allocated
Note:
It is strongly recommended to use the ModelBuilder to construct a simulation rather
than creating your own Model object directly, however it is possible to do so if
desired.
"""
def __init__(self, device=None):
self.requires_grad = False
self.num_envs = 0
self.particle_q = None
self.particle_qd = None
self.particle_mass = None
self.particle_inv_mass = None
self.particle_radius = None
self.particle_max_radius = 0.0
self.particle_ke = 1.0e3
self.particle_kd = 1.0e2
self.particle_kf = 1.0e2
self.particle_mu = 0.5
self.particle_cohesion = 0.0
self.particle_adhesion = 0.0
self.particle_grid = None
self.particle_flags = None
self.particle_max_velocity = 1e5
self.shape_transform = None
self.shape_body = None
self.shape_visible = None
self.body_shapes = {}
self.shape_materials = ModelShapeMaterials()
self.shape_geo = ModelShapeGeometry()
self.shape_geo_src = None
self.shape_collision_group = None
self.shape_collision_group_map = None
self.shape_collision_filter_pairs = None
self.shape_collision_radius = None
self.shape_ground_collision = None
self.shape_shape_collision = None
self.shape_contact_pairs = None
self.shape_ground_contact_pairs = None
self.spring_indices = None
self.spring_rest_length = None
self.spring_stiffness = None
self.spring_damping = None
self.spring_control = None
self.spring_constraint_lambdas = None
self.tri_indices = None
self.tri_poses = None
self.tri_activations = None
self.tri_materials = None
self.edge_indices = None
self.edge_rest_angle = None
self.edge_bending_properties = None
self.edge_constraint_lambdas = None
self.tet_indices = None
self.tet_poses = None
self.tet_activations = None
self.tet_materials = None
self.muscle_start = None
self.muscle_params = None
self.muscle_bodies = None
self.muscle_points = None
self.muscle_activations = None
self.body_q = None
self.body_qd = None
self.body_com = None
self.body_inertia = None
self.body_inv_inertia = None
self.body_mass = None
self.body_inv_mass = None
self.body_name = None
self.joint_q = None
self.joint_qd = None
self.joint_act = None
self.joint_type = None
self.joint_parent = None
self.joint_child = None
self.joint_X_p = None
self.joint_X_c = None
self.joint_axis = None
self.joint_armature = None
self.joint_target_ke = None
self.joint_target_kd = None
self.joint_axis_start = None
self.joint_axis_dim = None
self.joint_axis_mode = None
self.joint_linear_compliance = None
self.joint_angular_compliance = None
self.joint_enabled = None
self.joint_limit_lower = None
self.joint_limit_upper = None
self.joint_limit_ke = None
self.joint_limit_kd = None
self.joint_twist_lower = None
self.joint_twist_upper = None
self.joint_q_start = None
self.joint_qd_start = None
self.articulation_start = None
self.joint_name = None
# todo: per-joint values?
self.joint_attach_ke = 1.0e3
self.joint_attach_kd = 1.0e2
self.soft_contact_margin = 0.2
self.soft_contact_ke = 1.0e3
self.soft_contact_kd = 10.0
self.soft_contact_kf = 1.0e3
self.soft_contact_mu = 0.5
self.soft_contact_restitution = 0.0
self.soft_contact_count = 0
self.soft_contact_particle = None
self.soft_contact_shape = None
self.soft_contact_body_pos = None
self.soft_contact_body_vel = None
self.soft_contact_normal = None
self.rigid_contact_max = 0
self.rigid_contact_max_limited = 0
self.rigid_mesh_contact_max = 0
self.rigid_contact_margin = None
self.rigid_contact_torsional_friction = None
self.rigid_contact_rolling_friction = None
self.rigid_contact_count = None
self.rigid_contact_point0 = None
self.rigid_contact_point1 = None
self.rigid_contact_offset0 = None
self.rigid_contact_offset1 = None
self.rigid_contact_normal = None
self.rigid_contact_thickness = None
self.rigid_contact_shape0 = None
self.rigid_contact_shape1 = None
# toggles ground contact for all shapes
self.ground = True
self.ground_plane = None
self.up_vector = np.array((0.0, 1.0, 0.0))
self.up_axis = 1
self.gravity = np.array((0.0, -9.80665, 0.0))
self.particle_count = 0
self.body_count = 0
self.shape_count = 0
self.joint_count = 0
self.joint_axis_count = 0
self.tri_count = 0
self.tet_count = 0
self.edge_count = 0
self.spring_count = 0
self.muscle_count = 0
self.articulation_count = 0
self.joint_dof_count = 0
self.joint_coord_count = 0
self.device = wp.get_device(device)
def state(self, requires_grad=None) -> State:
"""Returns a state object for the model
The returned state will be initialized with the initial configuration given in
the model description.
Args:
requires_grad (bool): Manual overwrite whether the state variables should have `requires_grad` enabled (defaults to `None` to use the model's setting :attr:`requires_grad`)
Returns:
State: The state object
"""
s = State()
if requires_grad is None:
requires_grad = self.requires_grad
# particles
if self.particle_count:
s.particle_q = wp.clone(self.particle_q, requires_grad=requires_grad)
s.particle_qd = wp.clone(self.particle_qd, requires_grad=requires_grad)
s.particle_f = wp.zeros_like(self.particle_qd, requires_grad=requires_grad)
# articulations
if self.body_count:
s.body_q = wp.clone(self.body_q, requires_grad=requires_grad)
s.body_qd = wp.clone(self.body_qd, requires_grad=requires_grad)
s.body_f = wp.zeros_like(self.body_qd, requires_grad=requires_grad)
if self.joint_count:
s.joint_q = wp.clone(self.joint_q, requires_grad=requires_grad)
s.joint_qd = wp.clone(self.joint_qd, requires_grad=requires_grad)
return s
def control(self, requires_grad=None, clone_variables=True) -> Control:
"""
Returns a control object for the model.
The returned control object will be initialized with the control inputs given in the model description.
Args:
requires_grad (bool): Manual overwrite whether the control variables should have `requires_grad` enabled (defaults to `None` to use the model's setting :attr:`requires_grad`)
clone_variables (bool): Whether to clone the control inputs or use the original data
Returns:
Control: The control object
"""
c = Control(self)
if requires_grad is None:
requires_grad = self.requires_grad
if clone_variables:
if self.joint_count:
c.joint_act = wp.clone(self.joint_act, requires_grad=requires_grad)
if self.tri_count:
c.tri_activations = wp.clone(self.tri_activations, requires_grad=requires_grad)
if self.tet_count:
c.tet_activations = wp.clone(self.tet_activations, requires_grad=requires_grad)
if self.muscle_count:
c.muscle_activations = wp.clone(self.muscle_activations, requires_grad=requires_grad)
else:
c.joint_act = self.joint_act
c.tri_activations = self.tri_activations
c.tet_activations = self.tet_activations
c.muscle_activations = self.muscle_activations
return c
def _allocate_soft_contacts(self, target, count, requires_grad=False):
with wp.ScopedDevice(self.device):
target.soft_contact_count = wp.zeros(1, dtype=wp.int32)
target.soft_contact_particle = wp.zeros(count, dtype=int)
target.soft_contact_shape = wp.zeros(count, dtype=int)
target.soft_contact_body_pos = wp.zeros(count, dtype=wp.vec3, requires_grad=requires_grad)
target.soft_contact_body_vel = wp.zeros(count, dtype=wp.vec3, requires_grad=requires_grad)
target.soft_contact_normal = wp.zeros(count, dtype=wp.vec3, requires_grad=requires_grad)
target.soft_contact_tids = wp.zeros(count, dtype=int)
def allocate_soft_contacts(self, count, requires_grad=False):
self._allocate_soft_contacts(self, count, requires_grad)
def find_shape_contact_pairs(self):
# find potential contact pairs based on collision groups and collision mask (pairwise filtering)
import copy
import itertools
filters = copy.copy(self.shape_collision_filter_pairs)
for a, b in self.shape_collision_filter_pairs:
filters.add((b, a))
contact_pairs = []
# iterate over collision groups (islands)
for group, shapes in self.shape_collision_group_map.items():
for shape_a, shape_b in itertools.product(shapes, shapes):
if not self.shape_shape_collision[shape_a]:
continue
if not self.shape_shape_collision[shape_b]:
continue
if shape_a < shape_b and (shape_a, shape_b) not in filters:
contact_pairs.append((shape_a, shape_b))
if group != -1 and -1 in self.shape_collision_group_map:
# shapes with collision group -1 collide with all other shapes
for shape_a, shape_b in itertools.product(shapes, self.shape_collision_group_map[-1]):
if shape_a < shape_b and (shape_a, shape_b) not in filters:
contact_pairs.append((shape_a, shape_b))
self.shape_contact_pairs = wp.array(np.array(contact_pairs), dtype=wp.int32, device=self.device)
self.shape_contact_pair_count = len(contact_pairs)
# find ground contact pairs
ground_contact_pairs = []
ground_id = self.shape_count - 1
for i in range(ground_id):
if self.shape_ground_collision[i]:
ground_contact_pairs.append((i, ground_id))
self.shape_ground_contact_pairs = wp.array(np.array(ground_contact_pairs), dtype=wp.int32, device=self.device)
self.shape_ground_contact_pair_count = len(ground_contact_pairs)
def count_contact_points(self):
"""
Counts the maximum number of rigid contact points that need to be allocated.
This function returns two values corresponding to the maximum number of potential contacts
excluding the limiting from `Model.rigid_mesh_contact_max` and the maximum number of
contact points that may be generated when considering the `Model.rigid_mesh_contact_max` limit.
:returns:
- potential_count (int): Potential number of contact points
- actual_count (int): Actual number of contact points
"""
from .collide import count_contact_points
# calculate the potential number of shape pair contact points
contact_count = wp.zeros(2, dtype=wp.int32, device=self.device)
wp.launch(
kernel=count_contact_points,
dim=self.shape_contact_pair_count,
inputs=[
self.shape_contact_pairs,
self.shape_geo,
self.rigid_mesh_contact_max,
],
outputs=[contact_count],
device=self.device,
record_tape=False,
)
# count ground contacts
wp.launch(
kernel=count_contact_points,
dim=self.shape_ground_contact_pair_count,
inputs=[
self.shape_ground_contact_pairs,
self.shape_geo,
self.rigid_mesh_contact_max,
],
outputs=[contact_count],
device=self.device,
record_tape=False,
)
counts = contact_count.numpy()
potential_count = int(counts[0])
actual_count = int(counts[1])
return potential_count, actual_count
def allocate_rigid_contacts(self, target=None, count=None, limited_contact_count=None, requires_grad=False):
if count is not None:
# potential number of contact points to consider
self.rigid_contact_max = count
if limited_contact_count is not None:
self.rigid_contact_max_limited = limited_contact_count
if target is None:
target = self
with wp.ScopedDevice(self.device):
# serves as counter of the number of active contact points
target.rigid_contact_count = wp.zeros(1, dtype=wp.int32)
# contact point ID within the (shape_a, shape_b) contact pair
target.rigid_contact_point_id = wp.zeros(self.rigid_contact_max, dtype=wp.int32)
# position of contact point in body 0's frame before the integration step
target.rigid_contact_point0 = wp.zeros(
self.rigid_contact_max_limited, dtype=wp.vec3, requires_grad=requires_grad
)
# position of contact point in body 1's frame before the integration step
target.rigid_contact_point1 = wp.zeros(
self.rigid_contact_max_limited, dtype=wp.vec3, requires_grad=requires_grad
)
# moment arm before the integration step resulting from thickness displacement added to contact point 0 in body 0's frame (used in XPBD contact friction handling)
target.rigid_contact_offset0 = wp.zeros(
self.rigid_contact_max_limited, dtype=wp.vec3, requires_grad=requires_grad
)
# moment arm before the integration step resulting from thickness displacement added to contact point 1 in body 1's frame (used in XPBD contact friction handling)
target.rigid_contact_offset1 = wp.zeros(
self.rigid_contact_max_limited, dtype=wp.vec3, requires_grad=requires_grad
)
# contact normal in world frame
target.rigid_contact_normal = wp.zeros(
self.rigid_contact_max_limited, dtype=wp.vec3, requires_grad=requires_grad
)
# combined thickness of both shapes
target.rigid_contact_thickness = wp.zeros(
self.rigid_contact_max_limited, dtype=wp.float32, requires_grad=requires_grad
)
# ID of the first shape in the contact pair
target.rigid_contact_shape0 = wp.zeros(self.rigid_contact_max_limited, dtype=wp.int32)
# ID of the second shape in the contact pair
target.rigid_contact_shape1 = wp.zeros(self.rigid_contact_max_limited, dtype=wp.int32)
# shape IDs of potential contact pairs found during broadphase
target.rigid_contact_broad_shape0 = wp.zeros(self.rigid_contact_max, dtype=wp.int32)
target.rigid_contact_broad_shape1 = wp.zeros(self.rigid_contact_max, dtype=wp.int32)
max_pair_count = self.shape_count * self.shape_count
# maximum number of contact points per contact pair
target.rigid_contact_point_limit = wp.zeros(max_pair_count, dtype=wp.int32)
# currently found contacts per contact pair
target.rigid_contact_pairwise_counter = wp.zeros(max_pair_count, dtype=wp.int32)
# ID of thread that found the current contact point
target.rigid_contact_tids = wp.zeros(self.rigid_contact_max, dtype=wp.int32)
@property
def soft_contact_max(self):
"""Maximum number of soft contacts that can be registered"""
return len(self.soft_contact_particle)
class ModelBuilder:
"""A helper class for building simulation models at runtime.
Use the ModelBuilder to construct a simulation scene. The ModelBuilder
and builds the scene representation using standard Python data structures (lists),
this means it is not differentiable. Once :func:`finalize()`
has been called the ModelBuilder transfers all data to Warp tensors and returns
an object that may be used for simulation.
Example
-------
.. code-block:: python
import warp as wp
import warp.sim
builder = wp.sim.ModelBuilder()
# anchor point (zero mass)
builder.add_particle((0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
# build chain
for i in range(1, 10):
builder.add_particle((i, 1.0, 0.0), (0.0, 0.0, 0.0), 1.0)
builder.add_spring(i - 1, i, 1.0e3, 0.0, 0)
# create model
model = builder.finalize("cuda")
state = model.state()
control = model.control() # optional, to support time-varying control inputs
integrator = wp.sim.SemiImplicitIntegrator()
for i in range(100):
state.clear_forces()
integrator.simulate(model, state, state, dt=1.0 / 60.0, control=control)
Note:
It is strongly recommended to use the ModelBuilder to construct a simulation rather
than creating your own Model object directly, however it is possible to do so if
desired.
"""
# Default particle settings
default_particle_radius = 0.1
# Default triangle soft mesh settings
default_tri_ke = 100.0
default_tri_ka = 100.0
default_tri_kd = 10.0
default_tri_drag = 0.0
default_tri_lift = 0.0
# Default distance constraint properties
default_spring_ke = 100.0
default_spring_kd = 0.0
# Default edge bending properties
default_edge_ke = 100.0
default_edge_kd = 0.0
# Default rigid shape contact material properties
default_shape_ke = 1.0e5
default_shape_kd = 1000.0
default_shape_kf = 1000.0
default_shape_ka = 0.0
default_shape_mu = 0.5
default_shape_restitution = 0.0
default_shape_density = 1000.0
default_shape_thickness = 1e-5
# Default joint settings
default_joint_limit_ke = 100.0
default_joint_limit_kd = 1.0
def __init__(self, up_vector=(0.0, 1.0, 0.0), gravity=-9.80665):
self.num_envs = 0
# particles
self.particle_q = []
self.particle_qd = []
self.particle_mass = []
self.particle_radius = []
self.particle_flags = []
self.particle_max_velocity = 1e5
# shapes (each shape has an entry in these arrays)
# transform from shape to body
self.shape_transform = []
# maps from shape index to body index
self.shape_body = []
self.shape_visible = []
self.shape_geo_type = []
self.shape_geo_scale = []
self.shape_geo_src = []
self.shape_geo_is_solid = []
self.shape_geo_thickness = []
self.shape_material_ke = []
self.shape_material_kd = []
self.shape_material_kf = []
self.shape_material_ka = []
self.shape_material_mu = []
self.shape_material_restitution = []
# collision groups within collisions are handled
self.shape_collision_group = []
self.shape_collision_group_map = {}
self.last_collision_group = 0
# radius to use for broadphase collision checking
self.shape_collision_radius = []
# whether the shape collides with the ground
self.shape_ground_collision = []
# whether the shape collides with any other shape
self.shape_shape_collision = []
# filtering to ignore certain collision pairs
self.shape_collision_filter_pairs = set()
# geometry
self.geo_meshes = []
self.geo_sdfs = []
# springs
self.spring_indices = []
self.spring_rest_length = []
self.spring_stiffness = []
self.spring_damping = []
self.spring_control = []
# triangles
self.tri_indices = []
self.tri_poses = []
self.tri_activations = []
self.tri_materials = []
# edges (bending)
self.edge_indices = []
self.edge_rest_angle = []
self.edge_bending_properties = []
# tetrahedra
self.tet_indices = []
self.tet_poses = []
self.tet_activations = []
self.tet_materials = []
# muscles
self.muscle_start = []
self.muscle_params = []
self.muscle_activations = []
self.muscle_bodies = []
self.muscle_points = []
# rigid bodies
self.body_mass = []
self.body_inertia = []
self.body_inv_mass = []
self.body_inv_inertia = []
self.body_com = []
self.body_q = []
self.body_qd = []
self.body_name = []
self.body_shapes = {} # mapping from body to shapes
# rigid joints
self.joint = {}
self.joint_parent = [] # index of the parent body (constant)
self.joint_parents = {} # mapping from joint to parent bodies
self.joint_child = [] # index of the child body (constant)
self.joint_axis = [] # joint axis in child joint frame (constant)
self.joint_X_p = [] # frame of joint in parent (constant)
self.joint_X_c = [] # frame of child com (in child coordinates) (constant)
self.joint_q = []
self.joint_qd = []
self.joint_type = []
self.joint_name = []
self.joint_armature = []
self.joint_target_ke = []
self.joint_target_kd = []
self.joint_axis_mode = []
self.joint_limit_lower = []
self.joint_limit_upper = []
self.joint_limit_ke = []
self.joint_limit_kd = []
self.joint_act = []
self.joint_twist_lower = []
self.joint_twist_upper = []
self.joint_linear_compliance = []
self.joint_angular_compliance = []
self.joint_enabled = []
self.joint_q_start = []
self.joint_qd_start = []
self.joint_axis_start = []
self.joint_axis_dim = []
self.articulation_start = []
self.joint_dof_count = 0
self.joint_coord_count = 0
self.joint_axis_total_count = 0
self.up_vector = wp.vec3(up_vector)
self.up_axis = wp.vec3(np.argmax(np.abs(up_vector)))
self.gravity = gravity
# indicates whether a ground plane has been created
self._ground_created = False
# constructor parameters for ground plane shape
self._ground_params = {
"plane": (*up_vector, 0.0),
"width": 0.0,
"length": 0.0,
"ke": self.default_shape_ke,
"kd": self.default_shape_kd,
"kf": self.default_shape_kf,
"mu": self.default_shape_mu,
"restitution": self.default_shape_restitution,
}
# Maximum number of soft contacts that can be registered
self.soft_contact_max = 64 * 1024
# maximum number of contact points to generate per mesh shape
self.rigid_mesh_contact_max = 0 # 0 = unlimited
# contacts to be generated within the given distance margin to be generated at
# every simulation substep (can be 0 if only one PBD solver iteration is used)
self.rigid_contact_margin = 0.1
# torsional friction coefficient (only considered by XPBD so far)
self.rigid_contact_torsional_friction = 0.5
# rolling friction coefficient (only considered by XPBD so far)
self.rigid_contact_rolling_friction = 0.001
# number of rigid contact points to allocate in the model during self.finalize() per environment
# if setting is None, the number of worst-case number of contacts will be calculated in self.finalize()
self.num_rigid_contacts_per_env = None
@property
def shape_count(self):
return len(self.shape_geo_type)
@property
def body_count(self):
return len(self.body_q)
@property
def joint_count(self):
return len(self.joint_type)
@property
def joint_axis_count(self):
return len(self.joint_axis)
@property
def particle_count(self):
return len(self.particle_q)
@property
def tri_count(self):
return len(self.tri_poses)
@property
def tet_count(self):
return len(self.tet_poses)
@property
def edge_count(self):
return len(self.edge_rest_angle)
@property
def spring_count(self):
return len(self.spring_rest_length)
@property
def muscle_count(self):
return len(self.muscle_start)
@property
def articulation_count(self):
return len(self.articulation_start)
# an articulation is a set of contiguous bodies bodies from articulation_start[i] to articulation_start[i+1]
# these are used for computing forward kinematics e.g.:
#
# model.eval_articulation_fk()
# model.eval_articulation_j()
# model.eval_articulation_m()
#
# articulations are automatically 'closed' when calling finalize
def add_articulation(self):
self.articulation_start.append(self.joint_count)
def add_builder(self, builder, xform=None, update_num_env_count=True, separate_collision_group=True):
"""Copies the data from `builder`, another `ModelBuilder` to this `ModelBuilder`.
Args:
builder (ModelBuilder): a model builder to add model data from.
xform (:ref:`transform <transform>`): offset transform applied to root bodies.
update_num_env_count (bool): if True, the number of environments is incremented by 1.
separate_collision_group (bool): if True, the shapes from the articulations in `builder` will all be put into a single new collision group, otherwise, only the shapes in collision group > -1 will be moved to a new group.
"""
start_particle_idx = self.particle_count
if builder.particle_count:
self.particle_max_velocity = builder.particle_max_velocity
if xform is not None:
pos_offset = wp.transform_get_translation(xform)
else:
pos_offset = np.zeros(3)
self.particle_q.extend((np.array(builder.particle_q) + pos_offset).tolist())
# other particle attributes are added below
if builder.spring_count:
self.spring_indices.extend((np.array(builder.spring_indices, dtype=np.int32) + start_particle_idx).tolist())
if builder.edge_count:
self.edge_indices.extend((np.array(builder.edge_indices, dtype=np.int32) + start_particle_idx).tolist())
if builder.tri_count:
self.tri_indices.extend((np.array(builder.tri_indices, dtype=np.int32) + start_particle_idx).tolist())
if builder.tet_count:
self.tet_indices.extend((np.array(builder.tet_indices, dtype=np.int32) + start_particle_idx).tolist())
start_body_idx = self.body_count
start_shape_idx = self.shape_count
for s, b in enumerate(builder.shape_body):
if b > -1:
new_b = b + start_body_idx
self.shape_body.append(new_b)
self.shape_transform.append(builder.shape_transform[s])
else:
self.shape_body.append(-1)
# apply offset transform to root bodies
if xform is not None:
self.shape_transform.append(xform * builder.shape_transform[s])
for b, shapes in builder.body_shapes.items():
self.body_shapes[b + start_body_idx] = [s + start_shape_idx for s in shapes]
if builder.joint_count:
joint_X_p = copy.deepcopy(builder.joint_X_p)
joint_q = copy.deepcopy(builder.joint_q)
if xform is not None:
for i in range(len(joint_X_p)):
if builder.joint_type[i] == wp.sim.JOINT_FREE:
qi = builder.joint_q_start[i]
xform_prev = wp.transform(joint_q[qi : qi + 3], joint_q[qi + 3 : qi + 7])
tf = xform * xform_prev
joint_q[qi : qi + 3] = tf.p
joint_q[qi + 3 : qi + 7] = tf.q
elif builder.joint_parent[i] == -1:
joint_X_p[i] = xform * joint_X_p[i]
self.joint_X_p.extend(joint_X_p)
self.joint_q.extend(joint_q)
self.add_articulation()
# offset the indices
self.joint_parent.extend([p + self.joint_count if p != -1 else -1 for p in builder.joint_parent])
self.joint_child.extend([c + self.joint_count for c in builder.joint_child])
self.joint_q_start.extend([c + self.joint_coord_count for c in builder.joint_q_start])
self.joint_qd_start.extend([c + self.joint_dof_count for c in builder.joint_qd_start])
self.joint_axis_start.extend([a + self.joint_axis_total_count for a in builder.joint_axis_start])
joint_children = set(builder.joint_child)
for i in range(builder.body_count):
if xform is not None and i not in joint_children:
# rigid body is not attached to a joint, so apply input transform directly
self.body_q.append(xform * builder.body_q[i])
else:
self.body_q.append(builder.body_q[i])
# apply collision group
if separate_collision_group:
self.shape_collision_group.extend([self.last_collision_group + 1 for _ in builder.shape_collision_group])
else:
self.shape_collision_group.extend(
[(g + self.last_collision_group if g > -1 else -1) for g in builder.shape_collision_group]
)
shape_count = self.shape_count
for i, j in builder.shape_collision_filter_pairs:
self.shape_collision_filter_pairs.add((i + shape_count, j + shape_count))
for group, shapes in builder.shape_collision_group_map.items():
if separate_collision_group:
group = self.last_collision_group + 1
else:
group = group + self.last_collision_group if group > -1 else -1
if group not in self.shape_collision_group_map:
self.shape_collision_group_map[group] = []
self.shape_collision_group_map[group].extend([s + shape_count for s in shapes])
# update last collision group counter
if separate_collision_group:
self.last_collision_group += 1
elif builder.last_collision_group > -1:
self.last_collision_group += builder.last_collision_group
more_builder_attrs = [
"body_inertia",
"body_mass",
"body_inv_inertia",
"body_inv_mass",
"body_com",
"body_qd",
"body_name",
"joint_type",
"joint_enabled",
"joint_X_c",
"joint_armature",
"joint_axis",
"joint_axis_dim",
"joint_axis_mode",
"joint_name",
"joint_qd",
"joint_act",
"joint_limit_lower",
"joint_limit_upper",
"joint_limit_ke",
"joint_limit_kd",
"joint_target_ke",
"joint_target_kd",
"joint_linear_compliance",
"joint_angular_compliance",
"shape_visible",
"shape_geo_type",
"shape_geo_scale",
"shape_geo_src",
"shape_geo_is_solid",
"shape_geo_thickness",
"shape_material_ke",
"shape_material_kd",
"shape_material_kf",
"shape_material_ka",
"shape_material_mu",
"shape_material_restitution",
"shape_collision_radius",
"shape_ground_collision",
"shape_shape_collision",
"particle_qd",
"particle_mass",
"particle_radius",
"particle_flags",
"edge_rest_angle",
"edge_bending_properties",
"spring_rest_length",
"spring_stiffness",
"spring_damping",
"spring_control",
"tri_poses",
"tri_activations",
"tri_materials",
"tet_poses",
"tet_activations",
"tet_materials",
]
for attr in more_builder_attrs:
getattr(self, attr).extend(getattr(builder, attr))
self.joint_dof_count += builder.joint_dof_count
self.joint_coord_count += builder.joint_coord_count
self.joint_axis_total_count += builder.joint_axis_total_count
self.up_vector = builder.up_vector
self.gravity = builder.gravity
self._ground_params = builder._ground_params
if update_num_env_count:
self.num_envs += 1
# register a rigid body and return its index.
def add_body(
self,
origin: Optional[Transform] = None,
armature: float = 0.0,
com: Optional[Vec3] = None,
I_m: Optional[Mat33] = None,
m: float = 0.0,
name: str = None,
) -> int:
"""Adds a rigid body to the model.
Args:
origin: The location of the body in the world frame
armature: Artificial inertia added to the body
com: The center of mass of the body w.r.t its origin
I_m: The 3x3 inertia tensor of the body (specified relative to the center of mass)
m: Mass of the body
name: Name of the body (optional)
Returns:
The index of the body in the model
Note:
If the mass (m) is zero then the body is treated as kinematic with no dynamics
"""
if origin is None:
origin = wp.transform()
if com is None:
com = wp.vec3()
if I_m is None:
I_m = wp.mat33()
body_id = len(self.body_mass)
# body data
inertia = I_m + wp.mat33(np.eye(3)) * armature
self.body_inertia.append(inertia)
self.body_mass.append(m)
self.body_com.append(com)
if m > 0.0:
self.body_inv_mass.append(1.0 / m)
else:
self.body_inv_mass.append(0.0)
if any(x for x in inertia):
self.body_inv_inertia.append(wp.inverse(inertia))
else:
self.body_inv_inertia.append(inertia)
self.body_q.append(origin)
self.body_qd.append(wp.spatial_vector())
self.body_name.append(name or f"body {body_id}")
self.body_shapes[body_id] = []
return body_id
def add_joint(
self,
joint_type: wp.constant,
parent: int,
child: int,
linear_axes: Optional[List[JointAxis]] = None,
angular_axes: Optional[List[JointAxis]] = None,
name: str = None,
parent_xform: Optional[wp.transform] = None,
child_xform: Optional[wp.transform] = None,
linear_compliance: float = 0.0,
angular_compliance: float = 0.0,
armature: float = 1e-2,
collision_filter_parent: bool = True,
enabled: bool = True,
) -> int:
"""
Generic method to add any type of joint to this ModelBuilder.
Args:
joint_type (constant): The type of joint to add (see `Joint types`_)
parent (int): The index of the parent body (-1 is the world)
child (int): The index of the child body
linear_axes (list(:class:`JointAxis`)): The linear axes (see :class:`JointAxis`) of the joint
angular_axes (list(:class:`JointAxis`)): The angular axes (see :class:`JointAxis`) of the joint
name (str): The name of the joint (optional)
parent_xform (:ref:`transform <transform>`): The transform of the joint in the parent body's local frame
child_xform (:ref:`transform <transform>`): The transform of the joint in the child body's local frame
linear_compliance (float): The linear compliance of the joint
angular_compliance (float): The angular compliance of the joint
armature (float): Artificial inertia added around the joint axes (only considered by :class:`FeatherstoneIntegrator`)
collision_filter_parent (bool): Whether to filter collisions between shapes of the parent and child bodies
enabled (bool): Whether the joint is enabled (not considered by :class:`FeatherstoneIntegrator`)
Returns:
The index of the added joint
"""
if linear_axes is None:
linear_axes = []
if angular_axes is None:
angular_axes = []
if parent_xform is None:
parent_xform = wp.transform()
if child_xform is None:
child_xform = wp.transform()
if len(self.articulation_start) == 0:
# automatically add an articulation if none exists
self.add_articulation()
self.joint_type.append(joint_type)
self.joint_parent.append(parent)
if child not in self.joint_parents:
self.joint_parents[child] = [parent]
else:
self.joint_parents[child].append(parent)
self.joint_child.append(child)
self.joint_X_p.append(wp.transform(parent_xform))
self.joint_X_c.append(wp.transform(child_xform))
self.joint_name.append(name or f"joint {self.joint_count}")
self.joint_axis_start.append(len(self.joint_axis))
self.joint_axis_dim.append((len(linear_axes), len(angular_axes)))
self.joint_axis_total_count += len(linear_axes) + len(angular_axes)
self.joint_linear_compliance.append(linear_compliance)
self.joint_angular_compliance.append(angular_compliance)
self.joint_enabled.append(enabled)
def add_axis_dim(dim: JointAxis):
self.joint_axis.append(dim.axis)
self.joint_axis_mode.append(dim.mode)
self.joint_act.append(dim.action)
self.joint_target_ke.append(dim.target_ke)
self.joint_target_kd.append(dim.target_kd)
self.joint_limit_ke.append(dim.limit_ke)
self.joint_limit_kd.append(dim.limit_kd)
if np.isfinite(dim.limit_lower):
self.joint_limit_lower.append(dim.limit_lower)
else:
self.joint_limit_lower.append(-1e6)
if np.isfinite(dim.limit_upper):
self.joint_limit_upper.append(dim.limit_upper)
else:
self.joint_limit_upper.append(1e6)
for dim in linear_axes:
add_axis_dim(dim)
for dim in angular_axes:
add_axis_dim(dim)
if joint_type == JOINT_PRISMATIC:
dof_count = 1
coord_count = 1
elif joint_type == JOINT_REVOLUTE:
dof_count = 1
coord_count = 1
elif joint_type == JOINT_BALL:
dof_count = 3
coord_count = 4
elif joint_type == JOINT_FREE or joint_type == JOINT_DISTANCE:
dof_count = 6
coord_count = 7
elif joint_type == JOINT_FIXED:
dof_count = 0
coord_count = 0
elif joint_type == JOINT_UNIVERSAL:
dof_count = 2
coord_count = 2
elif joint_type == JOINT_COMPOUND:
dof_count = 3
coord_count = 3
elif joint_type == JOINT_D6:
dof_count = len(linear_axes) + len(angular_axes)
coord_count = dof_count
for _i in range(coord_count):
self.joint_q.append(0.0)
for _i in range(dof_count):
self.joint_qd.append(0.0)
self.joint_armature.append(armature)
if joint_type == JOINT_FREE or joint_type == JOINT_DISTANCE or joint_type == JOINT_BALL:
# ensure that a valid quaternion is used for the angular dofs
self.joint_q[-1] = 1.0
self.joint_q_start.append(self.joint_coord_count)
self.joint_qd_start.append(self.joint_dof_count)
self.joint_dof_count += dof_count
self.joint_coord_count += coord_count
if collision_filter_parent and parent > -1:
for child_shape in self.body_shapes[child]:
for parent_shape in self.body_shapes[parent]:
self.shape_collision_filter_pairs.add((parent_shape, child_shape))
return self.joint_count - 1
def add_joint_revolute(
self,
parent: int,
child: int,
parent_xform: Optional[wp.transform] = None,
child_xform: Optional[wp.transform] = None,
axis: Vec3 = (1.0, 0.0, 0.0),
target: float = None,
target_ke: float = 0.0,
target_kd: float = 0.0,
mode: int = JOINT_MODE_FORCE,
limit_lower: float = -2 * math.pi,
limit_upper: float = 2 * math.pi,
limit_ke: float = default_joint_limit_ke,
limit_kd: float = default_joint_limit_kd,
linear_compliance: float = 0.0,
angular_compliance: float = 0.0,
armature: float = 1e-2,
name: str = None,
collision_filter_parent: bool = True,
enabled: bool = True,
) -> int:
"""Adds a revolute (hinge) joint to the model. It has one degree of freedom.
Args:
parent: The index of the parent body
child: The index of the child body
parent_xform (:ref:`transform <transform>`): The transform of the joint in the parent body's local frame
child_xform (:ref:`transform <transform>`): The transform of the joint in the child body's local frame
axis (3D vector or JointAxis): The axis of rotation in the parent body's local frame, can be a JointAxis object whose settings will be used instead of the other arguments
target: The target angle (in radians) or target velocity of the joint (if None, the joint is considered to be in force control mode)
target_ke: The stiffness of the joint target
target_kd: The damping of the joint target
limit_lower: The lower limit of the joint
limit_upper: The upper limit of the joint
limit_ke: The stiffness of the joint limit
limit_kd: The damping of the joint limit
linear_compliance: The linear compliance of the joint
angular_compliance: The angular compliance of the joint
armature: Artificial inertia added around the joint axis
name: The name of the joint
collision_filter_parent: Whether to filter collisions between shapes of the parent and child bodies
enabled: Whether the joint is enabled
Returns:
The index of the added joint
"""
if parent_xform is None:
parent_xform = wp.transform()
if child_xform is None:
child_xform = wp.transform()
action = 0.0
if target is None and mode == JOINT_MODE_TARGET_POSITION:
action = 0.5 * (limit_lower + limit_upper)
elif target is not None:
action = target
if mode == JOINT_MODE_FORCE:
mode = JOINT_MODE_TARGET_POSITION
ax = JointAxis(
axis=axis,
limit_lower=limit_lower,
limit_upper=limit_upper,
action=action,
target_ke=target_ke,
target_kd=target_kd,
mode=mode,
limit_ke=limit_ke,
limit_kd=limit_kd,
)
return self.add_joint(
JOINT_REVOLUTE,
parent,
child,
parent_xform=parent_xform,
child_xform=child_xform,
angular_axes=[ax],
linear_compliance=linear_compliance,
angular_compliance=angular_compliance,
armature=armature,
name=name,
collision_filter_parent=collision_filter_parent,
enabled=enabled,
)
def add_joint_prismatic(
self,
parent: int,
child: int,
parent_xform: Optional[wp.transform] = None,
child_xform: Optional[wp.transform] = None,
axis: Vec3 = (1.0, 0.0, 0.0),
target: float = None,
target_ke: float = 0.0,
target_kd: float = 0.0,
mode: int = JOINT_MODE_FORCE,
limit_lower: float = -1e4,
limit_upper: float = 1e4,
limit_ke: float = default_joint_limit_ke,
limit_kd: float = default_joint_limit_kd,
linear_compliance: float = 0.0,
angular_compliance: float = 0.0,
armature: float = 1e-2,
name: str = None,
collision_filter_parent: bool = True,
enabled: bool = True,
) -> int:
"""Adds a prismatic (sliding) joint to the model. It has one degree of freedom.
Args:
parent: The index of the parent body
child: The index of the child body
parent_xform (:ref:`transform <transform>`): The transform of the joint in the parent body's local frame
child_xform (:ref:`transform <transform>`): The transform of the joint in the child body's local frame
axis (3D vector or JointAxis): The axis of rotation in the parent body's local frame, can be a JointAxis object whose settings will be used instead of the other arguments
target: The target position or velocity of the joint (if None, the joint is considered to be in force control mode)
target_ke: The stiffness of the joint target
target_kd: The damping of the joint target
limit_lower: The lower limit of the joint
limit_upper: The upper limit of the joint
limit_ke: The stiffness of the joint limit
limit_kd: The damping of the joint limit
linear_compliance: The linear compliance of the joint
angular_compliance: The angular compliance of the joint
armature: Artificial inertia added around the joint axis
name: The name of the joint
collision_filter_parent: Whether to filter collisions between shapes of the parent and child bodies
enabled: Whether the joint is enabled
Returns:
The index of the added joint
"""
if parent_xform is None:
parent_xform = wp.transform()
if child_xform is None:
child_xform = wp.transform()
action = 0.0
if target is None and mode == JOINT_MODE_TARGET_POSITION:
action = 0.5 * (limit_lower + limit_upper)
elif target is not None:
action = target
if mode == JOINT_MODE_FORCE:
mode = JOINT_MODE_TARGET_POSITION
ax = JointAxis(
axis=axis,
limit_lower=limit_lower,
limit_upper=limit_upper,
action=action,
target_ke=target_ke,
target_kd=target_kd,
mode=mode,
limit_ke=limit_ke,
limit_kd=limit_kd,
)
return self.add_joint(
JOINT_PRISMATIC,
parent,
child,
parent_xform=parent_xform,
child_xform=child_xform,
linear_axes=[ax],
linear_compliance=linear_compliance,
angular_compliance=angular_compliance,
armature=armature,
name=name,
collision_filter_parent=collision_filter_parent,
enabled=enabled,
)
def add_joint_ball(
self,
parent: int,
child: int,
parent_xform: Optional[wp.transform] = None,
child_xform: Optional[wp.transform] = None,
linear_compliance: float = 0.0,
angular_compliance: float = 0.0,
armature: float = 1e-2,
name: str = None,
collision_filter_parent: bool = True,
enabled: bool = True,
) -> int:
"""Adds a ball (spherical) joint to the model. Its position is defined by a 4D quaternion (xyzw) and its velocity is a 3D vector.
Args:
parent: The index of the parent body
child: The index of the child body
parent_xform (:ref:`transform <transform>`): The transform of the joint in the parent body's local frame
child_xform (:ref:`transform <transform>`): The transform of the joint in the child body's local frame
linear_compliance: The linear compliance of the joint
angular_compliance: The angular compliance of the joint
armature (float): Artificial inertia added around the joint axis (only considered by FeatherstoneIntegrator)
name: The name of the joint
collision_filter_parent: Whether to filter collisions between shapes of the parent and child bodies
enabled: Whether the joint is enabled
Returns:
The index of the added joint
"""
if parent_xform is None:
parent_xform = wp.transform()
if child_xform is None:
child_xform = wp.transform()
return self.add_joint(
JOINT_BALL,
parent,
child,
parent_xform=parent_xform,
child_xform=child_xform,
linear_compliance=linear_compliance,
angular_compliance=angular_compliance,
armature=armature,
name=name,
collision_filter_parent=collision_filter_parent,
enabled=enabled,
)
def add_joint_fixed(
self,
parent: int,
child: int,
parent_xform: Optional[wp.transform] = None,
child_xform: Optional[wp.transform] = None,
linear_compliance: float = 0.0,
angular_compliance: float = 0.0,
armature: float = 1e-2,
name: str = None,
collision_filter_parent: bool = True,
enabled: bool = True,
) -> int:
"""Adds a fixed (static) joint to the model. It has no degrees of freedom.
See :meth:`collapse_fixed_joints` for a helper function that removes these fixed joints and merges the connecting bodies to simplify the model and improve stability.
Args:
parent: The index of the parent body
child: The index of the child body
parent_xform (:ref:`transform <transform>`): The transform of the joint in the parent body's local frame
child_xform (:ref:`transform <transform>`): The transform of the joint in the child body's local frame
linear_compliance: The linear compliance of the joint
angular_compliance: The angular compliance of the joint
armature (float): Artificial inertia added around the joint axis (only considered by FeatherstoneIntegrator)
name: The name of the joint
collision_filter_parent: Whether to filter collisions between shapes of the parent and child bodies
enabled: Whether the joint is enabled
Returns:
The index of the added joint
"""
if parent_xform is None:
parent_xform = wp.transform()
if child_xform is None:
child_xform = wp.transform()
return self.add_joint(
JOINT_FIXED,
parent,
child,
parent_xform=parent_xform,
child_xform=child_xform,
linear_compliance=linear_compliance,
angular_compliance=angular_compliance,
armature=armature,
name=name,
collision_filter_parent=collision_filter_parent,
enabled=enabled,
)
def add_joint_free(
self,
child: int,
parent_xform: Optional[wp.transform] = None,
child_xform: Optional[wp.transform] = None,
armature: float = 0.0,
parent: int = -1,
name: str = None,
collision_filter_parent: bool = True,
enabled: bool = True,
) -> int:
"""Adds a free joint to the model.
It has 7 positional degrees of freedom (first 3 linear and then 4 angular dimensions for the orientation quaternion in `xyzw` notation) and 6 velocity degrees of freedom (first 3 angular and then 3 linear velocity dimensions).
Args:
child: The index of the child body
parent_xform (:ref:`transform <transform>`): The transform of the joint in the parent body's local frame
child_xform (:ref:`transform <transform>`): The transform of the joint in the child body's local frame
armature (float): Artificial inertia added around the joint axis (only considered by FeatherstoneIntegrator)
parent: The index of the parent body (-1 by default to use the world frame, e.g. to make the child body and its children a floating-base mechanism)
name: The name of the joint
collision_filter_parent: Whether to filter collisions between shapes of the parent and child bodies
enabled: Whether the joint is enabled
Returns:
The index of the added joint
"""
if parent_xform is None:
parent_xform = wp.transform()
if child_xform is None:
child_xform = wp.transform()
return self.add_joint(
JOINT_FREE,
parent,
child,
parent_xform=parent_xform,
child_xform=child_xform,
armature=armature,
name=name,
collision_filter_parent=collision_filter_parent,
enabled=enabled,
)
def add_joint_distance(
self,
parent: int,
child: int,
parent_xform: Optional[wp.transform] = None,
child_xform: Optional[wp.transform] = None,
min_distance: float = -1.0,
max_distance: float = 1.0,
compliance: float = 0.0,
collision_filter_parent: bool = True,
enabled: bool = True,
) -> int:
"""Adds a distance joint to the model. The distance joint constraints the distance between the joint anchor points on the two bodies (see :ref:`FK-IK`) it connects to the interval [`min_distance`, `max_distance`].
It has 7 positional degrees of freedom (first 3 linear and then 4 angular dimensions for the orientation quaternion in `xyzw` notation) and 6 velocity degrees of freedom (first 3 angular and then 3 linear velocity dimensions).
Args:
parent: The index of the parent body
child: The index of the child body
parent_xform (:ref:`transform <transform>`): The transform of the joint in the parent body's local frame
child_xform (:ref:`transform <transform>`): The transform of the joint in the child body's local frame
min_distance: The minimum distance between the bodies (no limit if negative)
max_distance: The maximum distance between the bodies (no limit if negative)
compliance: The compliance of the joint
collision_filter_parent: Whether to filter collisions between shapes of the parent and child bodies
enabled: Whether the joint is enabled
Returns:
The index of the added joint
.. note:: Distance joints are currently only supported in the :class:`XPBDIntegrator` at the moment.
"""
if parent_xform is None:
parent_xform = wp.transform()
if child_xform is None:
child_xform = wp.transform()
ax = JointAxis(
axis=(1.0, 0.0, 0.0),
limit_lower=min_distance,
limit_upper=max_distance,
)
return self.add_joint(
JOINT_DISTANCE,
parent,
child,
parent_xform=parent_xform,
child_xform=child_xform,
linear_axes=[ax],
linear_compliance=compliance,
collision_filter_parent=collision_filter_parent,
enabled=enabled,
)
def add_joint_universal(
self,
parent: int,
child: int,
axis_0: JointAxis,
axis_1: JointAxis,
parent_xform: Optional[wp.transform] = None,
child_xform: Optional[wp.transform] = None,
linear_compliance: float = 0.0,
angular_compliance: float = 0.0,
armature: float = 1e-2,
name: str = None,
collision_filter_parent: bool = True,
enabled: bool = True,
) -> int:
"""Adds a universal joint to the model. U-joints have two degrees of freedom, one for each axis.
Args:
parent: The index of the parent body
child: The index of the child body
axis_0 (3D vector or JointAxis): The first axis of the joint, can be a JointAxis object whose settings will be used instead of the other arguments
axis_1 (3D vector or JointAxis): The second axis of the joint, can be a JointAxis object whose settings will be used instead of the other arguments
parent_xform (:ref:`transform <transform>`): The transform of the joint in the parent body's local frame
child_xform (:ref:`transform <transform>`): The transform of the joint in the child body's local frame
linear_compliance: The linear compliance of the joint
angular_compliance: The angular compliance of the joint
armature: Artificial inertia added around the joint axes
name: The name of the joint
collision_filter_parent: Whether to filter collisions between shapes of the parent and child bodies
enabled: Whether the joint is enabled
Returns:
The index of the added joint
"""
if parent_xform is None:
parent_xform = wp.transform()
if child_xform is None:
child_xform = wp.transform()
return self.add_joint(
JOINT_UNIVERSAL,
parent,
child,
angular_axes=[JointAxis(axis_0), JointAxis(axis_1)],
parent_xform=parent_xform,
child_xform=child_xform,
linear_compliance=linear_compliance,
angular_compliance=angular_compliance,
armature=armature,
name=name,
collision_filter_parent=collision_filter_parent,
enabled=enabled,
)
def add_joint_compound(
self,
parent: int,
child: int,
axis_0: JointAxis,
axis_1: JointAxis,
axis_2: JointAxis,
parent_xform: Optional[wp.transform] = None,
child_xform: Optional[wp.transform] = None,
linear_compliance: float = 0.0,
angular_compliance: float = 0.0,
armature: float = 1e-2,
name: str = None,
collision_filter_parent: bool = True,
enabled: bool = True,
) -> int:
"""Adds a compound joint to the model, which has 3 degrees of freedom, one for each axis.
Similar to the ball joint (see :meth:`add_ball_joint`), the compound joint allows bodies to move in a 3D rotation relative to each other,
except that the rotation is defined by 3 axes instead of a quaternion.
Depending on the choice of axes, the orientation can be specified through Euler angles, e.g. `z-x-z` or `x-y-x`, or through a Tait-Bryan angle sequence, e.g. `z-y-x` or `x-y-z`.
Args:
parent: The index of the parent body
child: The index of the child body
axis_0 (3D vector or JointAxis): The first axis of the joint, can be a JointAxis object whose settings will be used instead of the other arguments
axis_1 (3D vector or JointAxis): The second axis of the joint, can be a JointAxis object whose settings will be used instead of the other arguments
axis_2 (3D vector or JointAxis): The third axis of the joint, can be a JointAxis object whose settings will be used instead of the other arguments
parent_xform (:ref:`transform <transform>`): The transform of the joint in the parent body's local frame
child_xform (:ref:`transform <transform>`): The transform of the joint in the child body's local frame
linear_compliance: The linear compliance of the joint
angular_compliance: The angular compliance of the joint
armature: Artificial inertia added around the joint axes
name: The name of the joint
collision_filter_parent: Whether to filter collisions between shapes of the parent and child bodies
enabled: Whether the joint is enabled
Returns:
The index of the added joint
"""
if parent_xform is None:
parent_xform = wp.transform()
if child_xform is None:
child_xform = wp.transform()
return self.add_joint(
JOINT_COMPOUND,
parent,
child,
angular_axes=[JointAxis(axis_0), JointAxis(axis_1), JointAxis(axis_2)],
parent_xform=parent_xform,
child_xform=child_xform,
linear_compliance=linear_compliance,
angular_compliance=angular_compliance,
armature=armature,
name=name,
collision_filter_parent=collision_filter_parent,
enabled=enabled,
)
def add_joint_d6(
self,
parent: int,
child: int,
linear_axes: Optional[List[JointAxis]] = None,
angular_axes: Optional[List[JointAxis]] = None,
name: str = None,
parent_xform: Optional[wp.transform] = None,
child_xform: Optional[wp.transform] = None,
linear_compliance: float = 0.0,
angular_compliance: float = 0.0,
armature: float = 1e-2,
collision_filter_parent: bool = True,
enabled: bool = True,
):
"""Adds a generic joint with custom linear and angular axes. The number of axes determines the number of degrees of freedom of the joint.
Args:
parent: The index of the parent body
child: The index of the child body
linear_axes: A list of linear axes
angular_axes: A list of angular axes
name: The name of the joint
parent_xform (:ref:`transform <transform>`): The transform of the joint in the parent body's local frame
child_xform (:ref:`transform <transform>`): The transform of the joint in the child body's local frame
linear_compliance: The linear compliance of the joint
angular_compliance: The angular compliance of the joint
armature: Artificial inertia added around the joint axes
collision_filter_parent: Whether to filter collisions between shapes of the parent and child bodies
enabled: Whether the joint is enabled
Returns:
The index of the added joint
"""
if linear_axes is None:
linear_axes = []
if angular_axes is None:
angular_axes = []
if parent_xform is None:
parent_xform = wp.transform()
if child_xform is None:
child_xform = wp.transform()
return self.add_joint(
JOINT_D6,
parent,
child,
parent_xform=parent_xform,
child_xform=child_xform,
linear_axes=[JointAxis(a) for a in linear_axes],
angular_axes=[JointAxis(a) for a in angular_axes],
linear_compliance=linear_compliance,
angular_compliance=angular_compliance,
armature=armature,
name=name,
collision_filter_parent=collision_filter_parent,
enabled=enabled,
)
def plot_articulation(self, plot_shapes=True):
"""Plots the model's articulation."""
def joint_type_str(type):
if type == JOINT_FREE:
return "free"
elif type == JOINT_BALL:
return "ball"
elif type == JOINT_PRISMATIC:
return "prismatic"
elif type == JOINT_REVOLUTE:
return "revolute"
elif type == JOINT_D6:
return "D6"
elif type == JOINT_UNIVERSAL:
return "universal"
elif type == JOINT_COMPOUND:
return "compound"
elif type == JOINT_FIXED:
return "fixed"
elif type == JOINT_DISTANCE:
return "distance"
return "unknown"
vertices = ["world"] + self.body_name
if plot_shapes:
vertices += [f"shape_{i}" for i in range(self.shape_count)]
edges = []
edge_labels = []
for i in range(self.joint_count):
edges.append((self.joint_child[i] + 1, self.joint_parent[i] + 1))
edge_labels.append(f"{self.joint_name[i]}\n({joint_type_str(self.joint_type[i])})")
if plot_shapes:
for i in range(self.shape_count):
edges.append((len(self.body_name) + i + 1, self.shape_body[i] + 1))
wp.plot_graph(vertices, edges, edge_labels=edge_labels)
def collapse_fixed_joints(self, verbose=wp.config.verbose):
"""Removes fixed joints from the model and merges the bodies they connect. This is useful for simplifying the model for faster and more stable simulation."""
body_data = {}
body_children = {-1: []}
visited = {}
for i in range(self.body_count):
name = self.body_name[i]
body_data[i] = {
"shapes": self.body_shapes[i],
"q": self.body_q[i],
"qd": self.body_qd[i],
"mass": self.body_mass[i],
"inertia": self.body_inertia[i],
"inv_mass": self.body_inv_mass[i],
"inv_inertia": self.body_inv_inertia[i],
"com": self.body_com[i],
"name": name,
"original_id": i,
}
visited[i] = False
body_children[i] = []
joint_data = {}
for i in range(self.joint_count):
name = self.joint_name[i]
parent = self.joint_parent[i]
child = self.joint_child[i]
body_children[parent].append(child)
q_start = self.joint_q_start[i]
qd_start = self.joint_qd_start[i]
if i < self.joint_count - 1:
q_dim = self.joint_q_start[i + 1] - q_start
qd_dim = self.joint_qd_start[i + 1] - qd_start
else:
q_dim = len(self.joint_q) - q_start
qd_dim = len(self.joint_qd) - qd_start
data = {
"type": self.joint_type[i],
"q": self.joint_q[q_start : q_start + q_dim],
"qd": self.joint_qd[qd_start : qd_start + qd_dim],
"act": self.joint_act[qd_start : qd_start + qd_dim],
"armature": self.joint_armature[qd_start : qd_start + qd_dim],
"q_start": q_start,
"qd_start": qd_start,
"linear_compliance": self.joint_linear_compliance[i],
"angular_compliance": self.joint_angular_compliance[i],
"name": name,
"parent_xform": wp.transform_expand(self.joint_X_p[i]),
"child_xform": wp.transform_expand(self.joint_X_c[i]),
"enabled": self.joint_enabled[i],
"axes": [],
"axis_dim": self.joint_axis_dim[i],
"parent": parent,
"child": child,
"original_id": i,
}
num_lin_axes, num_ang_axes = self.joint_axis_dim[i]
start_ax = self.joint_axis_start[i]
for j in range(start_ax, start_ax + num_lin_axes + num_ang_axes):
data["axes"].append(
{
"axis": self.joint_axis[j],
"axis_mode": self.joint_axis_mode[j],
"target_ke": self.joint_target_ke[j],
"target_kd": self.joint_target_kd[j],
"limit_ke": self.joint_limit_ke[j],
"limit_kd": self.joint_limit_kd[j],
"limit_lower": self.joint_limit_lower[j],
"limit_upper": self.joint_limit_upper[j],
}
)
joint_data[(parent, child)] = data
# sort body children so we traverse the tree in the same order as the bodies are listed
for children in body_children.values():
children.sort(key=lambda x: body_data[x]["original_id"])
retained_joints = []
retained_bodies = []
body_remap = {-1: -1}
# depth first search over the joint graph
def dfs(parent_body: int, child_body: int, incoming_xform: wp.transform, last_dynamic_body: int):
nonlocal visited
nonlocal retained_joints
nonlocal retained_bodies
nonlocal body_data
nonlocal body_remap
joint = joint_data[(parent_body, child_body)]
if joint["type"] == JOINT_FIXED:
joint_xform = joint["parent_xform"] * wp.transform_inverse(joint["child_xform"])
incoming_xform = incoming_xform * joint_xform
parent_name = self.body_name[parent_body] if parent_body > -1 else "world"
child_name = self.body_name[child_body]
last_dynamic_body_name = self.body_name[last_dynamic_body] if last_dynamic_body > -1 else "world"
if verbose:
print(
f'Remove fixed joint {joint["name"]} between {parent_name} and {child_name}, '
f"merging {child_name} into {last_dynamic_body_name}"
)
child_id = body_data[child_body]["original_id"]
for shape in self.body_shapes[child_id]:
self.shape_transform[shape] = incoming_xform * self.shape_transform[shape]
if verbose:
print(
f" Shape {shape} moved to body {last_dynamic_body_name} with transform {self.shape_transform[shape]}"
)
if last_dynamic_body > -1:
self.shape_body[shape] = body_data[last_dynamic_body]["id"]
# add inertia to last_dynamic_body
m = body_data[child_body]["mass"]
com = body_data[child_body]["com"]
inertia = body_data[child_body]["inertia"]
body_data[last_dynamic_body]["inertia"] += wp.sim.transform_inertia(
m, inertia, incoming_xform.p, incoming_xform.q
)
body_data[last_dynamic_body]["mass"] += m
source_m = body_data[last_dynamic_body]["mass"]
source_com = body_data[last_dynamic_body]["com"]
body_data[last_dynamic_body]["com"] = (m * com + source_m * source_com) / (m + source_m)
body_data[last_dynamic_body]["shapes"].append(shape)
# indicate to recompute inverse mass, inertia for this body
body_data[last_dynamic_body]["inv_mass"] = None
else:
self.shape_body[shape] = -1
else:
joint["parent_xform"] = incoming_xform * joint["parent_xform"]
joint["parent"] = last_dynamic_body
last_dynamic_body = child_body
incoming_xform = wp.transform()
retained_joints.append(joint)
new_id = len(retained_bodies)
body_data[child_body]["id"] = new_id
retained_bodies.append(child_body)
for shape in body_data[child_body]["shapes"]:
self.shape_body[shape] = new_id
visited[parent_body] = True
if visited[child_body] or child_body not in body_children:
return
for child in body_children[child_body]:
if not visited[child]:
dfs(child_body, child, incoming_xform, last_dynamic_body)
for body in body_children[-1]:
if not visited[body]:
dfs(-1, body, wp.transform(), -1)
# repopulate the model
self.body_name.clear()
self.body_q.clear()
self.body_qd.clear()
self.body_mass.clear()
self.body_inertia.clear()
self.body_com.clear()
self.body_inv_mass.clear()
self.body_inv_inertia.clear()
self.body_shapes.clear()
for i in retained_bodies:
body = body_data[i]
new_id = len(self.body_name)
body_remap[body["original_id"]] = new_id
self.body_name.append(body["name"])
self.body_q.append(list(body["q"]))
self.body_qd.append(list(body["qd"]))
m = body["mass"]
inertia = body["inertia"]
self.body_mass.append(m)
self.body_inertia.append(inertia)
self.body_com.append(body["com"])
if body["inv_mass"] is None:
# recompute inverse mass and inertia
if m > 0.0:
self.body_inv_mass.append(1.0 / m)
self.body_inv_inertia.append(wp.inverse(inertia))
else:
self.body_inv_mass.append(0.0)
self.body_inv_inertia.append(wp.mat33(0.0))
else:
self.body_inv_mass.append(body["inv_mass"])
self.body_inv_inertia.append(body["inv_inertia"])
self.body_shapes[new_id] = body["shapes"]
body_remap[body["original_id"]] = new_id
# sort joints so they appear in the same order as before
retained_joints.sort(key=lambda x: x["original_id"])
self.joint_name.clear()
self.joint_type.clear()
self.joint_parent.clear()
self.joint_child.clear()
self.joint_q.clear()
self.joint_qd.clear()
self.joint_q_start.clear()
self.joint_qd_start.clear()
self.joint_enabled.clear()
self.joint_linear_compliance.clear()
self.joint_angular_compliance.clear()
self.joint_armature.clear()
self.joint_X_p.clear()
self.joint_X_c.clear()
self.joint_axis.clear()
self.joint_axis_mode.clear()
self.joint_target_ke.clear()
self.joint_target_kd.clear()
self.joint_limit_lower.clear()
self.joint_limit_upper.clear()
self.joint_limit_ke.clear()
self.joint_limit_kd.clear()
self.joint_axis_dim.clear()
self.joint_axis_start.clear()
self.joint_act.clear()
for joint in retained_joints:
self.joint_name.append(joint["name"])
self.joint_type.append(joint["type"])
self.joint_parent.append(body_remap[joint["parent"]])
self.joint_child.append(body_remap[joint["child"]])
self.joint_q_start.append(len(self.joint_q))
self.joint_qd_start.append(len(self.joint_qd))
self.joint_q.extend(joint["q"])
self.joint_qd.extend(joint["qd"])
self.joint_act.extend(joint["act"])
self.joint_armature.extend(joint["armature"])
self.joint_enabled.append(joint["enabled"])
self.joint_linear_compliance.append(joint["linear_compliance"])
self.joint_angular_compliance.append(joint["angular_compliance"])
self.joint_X_p.append(list(joint["parent_xform"]))
self.joint_X_c.append(list(joint["child_xform"]))
self.joint_axis_dim.append(joint["axis_dim"])
self.joint_axis_start.append(len(self.joint_axis))
for axis in joint["axes"]:
self.joint_axis.append(axis["axis"])
self.joint_axis_mode.append(axis["axis_mode"])
self.joint_target_ke.append(axis["target_ke"])
self.joint_target_kd.append(axis["target_kd"])
self.joint_limit_lower.append(axis["limit_lower"])
self.joint_limit_upper.append(axis["limit_upper"])
self.joint_limit_ke.append(axis["limit_ke"])
self.joint_limit_kd.append(axis["limit_kd"])
# muscles
def add_muscle(
self, bodies: List[int], positions: List[Vec3], f0: float, lm: float, lt: float, lmax: float, pen: float
) -> float:
"""Adds a muscle-tendon activation unit.
Args:
bodies: A list of body indices for each waypoint
positions: A list of positions of each waypoint in the body's local frame
f0: Force scaling
lm: Muscle length
lt: Tendon length
lmax: Maximally efficient muscle length
Returns:
The index of the muscle in the model
.. note:: The simulation support for muscles is in progress and not yet fully functional.
"""
n = len(bodies)
self.muscle_start.append(len(self.muscle_bodies))
self.muscle_params.append((f0, lm, lt, lmax, pen))
self.muscle_activations.append(0.0)
for i in range(n):
self.muscle_bodies.append(bodies[i])
self.muscle_points.append(positions[i])
# return the index of the muscle
return len(self.muscle_start) - 1
# shapes
def add_shape_plane(
self,
plane: Vec4 = (0.0, 1.0, 0.0, 0.0),
pos: Vec3 = None,
rot: Quat = None,
width: float = 10.0,
length: float = 10.0,
body: int = -1,
ke: float = None,
kd: float = None,
kf: float = None,
ka: float = None,
mu: float = None,
restitution: float = None,
thickness: float = None,
has_ground_collision: bool = False,
has_shape_collision: bool = True,
is_visible: bool = True,
collision_group: int = -1,
):
"""
Adds a plane collision shape.
If pos and rot are defined, the plane is assumed to have its normal as (0, 1, 0).
Otherwise, the plane equation defined through the `plane` argument is used.
Args:
plane: The plane equation in form a*x + b*y + c*z + d = 0
pos: The position of the plane in world coordinates
rot: The rotation of the plane in world coordinates
width: The extent along x of the plane (infinite if 0)
length: The extent along z of the plane (infinite if 0)
body: The body index to attach the shape to (-1 by default to keep the plane static)
ke: The contact elastic stiffness (None to use the default value :attr:`default_shape_ke`)
kd: The contact damping stiffness (None to use the default value :attr:`default_shape_kd`)
kf: The contact friction stiffness (None to use the default value :attr:`default_shape_kf`)
ka: The contact adhesion distance (None to use the default value :attr:`default_shape_ka`)
mu: The coefficient of friction (None to use the default value :attr:`default_shape_mu`)
restitution: The coefficient of restitution (None to use the default value :attr:`default_shape_restitution`)
thickness: The thickness of the plane (0 by default) for collision handling (None to use the default value :attr:`default_shape_thickness`)
has_ground_collision: If True, the shape will collide with the ground plane if `Model.ground` is True
has_shape_collision: If True, the shape will collide with other shapes
is_visible: Whether the plane is visible
collision_group: The collision group of the shape
Returns:
The index of the added shape
"""
if pos is None or rot is None:
# compute position and rotation from plane equation
normal = np.array(plane[:3])
normal /= np.linalg.norm(normal)
pos = plane[3] * normal
if np.allclose(normal, (0.0, 1.0, 0.0)):
# no rotation necessary
rot = (0.0, 0.0, 0.0, 1.0)
else:
c = np.cross(normal, (0.0, 1.0, 0.0))
angle = np.arcsin(np.linalg.norm(c))
axis = np.abs(c) / np.linalg.norm(c)
rot = wp.quat_from_axis_angle(axis, angle)
scale = wp.vec3(width, length, 0.0)
return self._add_shape(
body,
pos,
rot,
GEO_PLANE,
scale,
None,
0.0,
ke,
kd,
kf,
ka,
mu,
restitution,
thickness,
has_ground_collision=has_ground_collision,
has_shape_collision=has_shape_collision,
is_visible=is_visible,
collision_group=collision_group,
)
def add_shape_sphere(
self,
body,
pos: Vec3 = (0.0, 0.0, 0.0),
rot: Quat = (0.0, 0.0, 0.0, 1.0),
radius: float = 1.0,
density: float = None,
ke: float = None,
kd: float = None,
kf: float = None,
ka: float = None,
mu: float = None,
restitution: float = None,
is_solid: bool = True,
thickness: float = None,
has_ground_collision: bool = True,
has_shape_collision: bool = True,
collision_group: int = -1,
is_visible: bool = True,
):
"""Adds a sphere collision shape to a body.
Args:
body: The index of the parent body this shape belongs to (use -1 for static shapes)
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
radius: The radius of the sphere
density: The density of the shape (None to use the default value :attr:`default_shape_density`)
ke: The contact elastic stiffness (None to use the default value :attr:`default_shape_ke`)
kd: The contact damping stiffness (None to use the default value :attr:`default_shape_kd`)
kf: The contact friction stiffness (None to use the default value :attr:`default_shape_kf`)
ka: The contact adhesion distance (None to use the default value :attr:`default_shape_ka`)
mu: The coefficient of friction (None to use the default value :attr:`default_shape_mu`)
restitution: The coefficient of restitution (None to use the default value :attr:`default_shape_restitution`)
is_solid: Whether the sphere is solid or hollow
thickness: Thickness to use for computing inertia of a hollow sphere, and for collision handling (None to use the default value :attr:`default_shape_thickness`)
has_ground_collision: If True, the shape will collide with the ground plane if `Model.ground` is True
has_shape_collision: If True, the shape will collide with other shapes
collision_group: The collision group of the shape
is_visible: Whether the sphere is visible
Returns:
The index of the added shape
"""
thickness = self.default_shape_thickness if thickness is None else thickness
return self._add_shape(
body,
wp.vec3(pos),
wp.quat(rot),
GEO_SPHERE,
wp.vec3(radius, 0.0, 0.0),
None,
density,
ke,
kd,
kf,
ka,
mu,
restitution,
thickness + radius,
is_solid,
has_ground_collision=has_ground_collision,
has_shape_collision=has_shape_collision,
collision_group=collision_group,
is_visible=is_visible,
)
def add_shape_box(
self,
body: int,
pos: Vec3 = (0.0, 0.0, 0.0),
rot: Quat = (0.0, 0.0, 0.0, 1.0),
hx: float = 0.5,
hy: float = 0.5,
hz: float = 0.5,
density: float = None,
ke: float = None,
kd: float = None,
kf: float = None,
ka: float = None,
mu: float = None,
restitution: float = None,
is_solid: bool = True,
thickness: float = None,
has_ground_collision: bool = True,
has_shape_collision: bool = True,
collision_group: int = -1,
is_visible: bool = True,
):
"""Adds a box collision shape to a body.
Args:
body: The index of the parent body this shape belongs to (use -1 for static shapes)
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
hx: The half-extent along the x-axis
hy: The half-extent along the y-axis
hz: The half-extent along the z-axis
density: The density of the shape (None to use the default value :attr:`default_shape_density`)
ke: The contact elastic stiffness (None to use the default value :attr:`default_shape_ke`)
kd: The contact damping stiffness (None to use the default value :attr:`default_shape_kd`)
kf: The contact friction stiffness (None to use the default value :attr:`default_shape_kf`)
ka: The contact adhesion distance (None to use the default value :attr:`default_shape_ka`)
mu: The coefficient of friction (None to use the default value :attr:`default_shape_mu`)
restitution: The coefficient of restitution (None to use the default value :attr:`default_shape_restitution`)
is_solid: Whether the box is solid or hollow
thickness: Thickness to use for computing inertia of a hollow box, and for collision handling (None to use the default value :attr:`default_shape_thickness`)
has_ground_collision: If True, the shape will collide with the ground plane if `Model.ground` is True
has_shape_collision: If True, the shape will collide with other shapes
collision_group: The collision group of the shape
is_visible: Whether the box is visible
Returns:
The index of the added shape
"""
return self._add_shape(
body,
wp.vec3(pos),
wp.quat(rot),
GEO_BOX,
wp.vec3(hx, hy, hz),
None,
density,
ke,
kd,
kf,
ka,
mu,
restitution,
thickness,
is_solid,
has_ground_collision=has_ground_collision,
has_shape_collision=has_shape_collision,
collision_group=collision_group,
is_visible=is_visible,
)
def add_shape_capsule(
self,
body: int,
pos: Vec3 = (0.0, 0.0, 0.0),
rot: Quat = (0.0, 0.0, 0.0, 1.0),
radius: float = 1.0,
half_height: float = 0.5,
up_axis: int = 1,
density: float = None,
ke: float = None,
kd: float = None,
kf: float = None,
ka: float = None,
mu: float = None,
restitution: float = None,
is_solid: bool = True,
thickness: float = None,
has_ground_collision: bool = True,
has_shape_collision: bool = True,
collision_group: int = -1,
is_visible: bool = True,
):
"""Adds a capsule collision shape to a body.
Args:
body: The index of the parent body this shape belongs to (use -1 for static shapes)
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
radius: The radius of the capsule
half_height: The half length of the center cylinder along the up axis
up_axis: The axis along which the capsule is aligned (0=x, 1=y, 2=z)
density: The density of the shape (None to use the default value :attr:`default_shape_density`)
ke: The contact elastic stiffness (None to use the default value :attr:`default_shape_ke`)
kd: The contact damping stiffness (None to use the default value :attr:`default_shape_kd`)
kf: The contact friction stiffness (None to use the default value :attr:`default_shape_kf`)
ka: The contact adhesion distance (None to use the default value :attr:`default_shape_ka`)
mu: The coefficient of friction (None to use the default value :attr:`default_shape_mu`)
restitution: The coefficient of restitution (None to use the default value :attr:`default_shape_restitution`)
is_solid: Whether the capsule is solid or hollow
thickness: Thickness to use for computing inertia of a hollow capsule, and for collision handling (None to use the default value :attr:`default_shape_thickness`)
has_ground_collision: If True, the shape will collide with the ground plane if `Model.ground` is True
has_shape_collision: If True, the shape will collide with other shapes
collision_group: The collision group of the shape
is_visible: Whether the capsule is visible
Returns:
The index of the added shape
"""
q = wp.quat(rot)
sqh = math.sqrt(0.5)
if up_axis == 0:
q = wp.mul(q, wp.quat(0.0, 0.0, -sqh, sqh))
elif up_axis == 2:
q = wp.mul(q, wp.quat(sqh, 0.0, 0.0, sqh))
thickness = self.default_shape_thickness if thickness is None else thickness
return self._add_shape(
body,
wp.vec3(pos),
wp.quat(q),
GEO_CAPSULE,
wp.vec3(radius, half_height, 0.0),
None,
density,
ke,
kd,
kf,
ka,
mu,
restitution,
thickness + radius,
is_solid,
has_ground_collision=has_ground_collision,
has_shape_collision=has_shape_collision,
collision_group=collision_group,
is_visible=is_visible,
)
def add_shape_cylinder(
self,
body: int,
pos: Vec3 = (0.0, 0.0, 0.0),
rot: Quat = (0.0, 0.0, 0.0, 1.0),
radius: float = 1.0,
half_height: float = 0.5,
up_axis: int = 1,
density: float = None,
ke: float = None,
kd: float = None,
kf: float = None,
ka: float = None,
mu: float = None,
restitution: float = None,
is_solid: bool = True,
thickness: float = None,
has_ground_collision: bool = True,
has_shape_collision: bool = True,
collision_group: int = -1,
is_visible: bool = True,
):
"""Adds a cylinder collision shape to a body.
Args:
body: The index of the parent body this shape belongs to (use -1 for static shapes)
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
radius: The radius of the cylinder
half_height: The half length of the cylinder along the up axis
up_axis: The axis along which the cylinder is aligned (0=x, 1=y, 2=z)
density: The density of the shape (None to use the default value :attr:`default_shape_density`)
ke: The contact elastic stiffness (None to use the default value :attr:`default_shape_ke`)
kd: The contact damping stiffness (None to use the default value :attr:`default_shape_kd`)
kf: The contact friction stiffness (None to use the default value :attr:`default_shape_kf`)
ka: The contact adhesion distance (None to use the default value :attr:`default_shape_ka`)
mu: The coefficient of friction (None to use the default value :attr:`default_shape_mu`)
restitution: The coefficient of restitution (None to use the default value :attr:`default_shape_restitution`)
is_solid: Whether the cylinder is solid or hollow
thickness: Thickness to use for computing inertia of a hollow cylinder, and for collision handling (None to use the default value :attr:`default_shape_thickness`)
has_ground_collision: If True, the shape will collide with the ground plane if `Model.ground` is True
has_shape_collision: If True, the shape will collide with other shapes
collision_group: The collision group of the shape
is_visible: Whether the cylinder is visible
Note:
Cylinders are currently not supported in rigid body collision handling.
Returns:
The index of the added shape
"""
q = rot
sqh = math.sqrt(0.5)
if up_axis == 0:
q = wp.mul(rot, wp.quat(0.0, 0.0, -sqh, sqh))
elif up_axis == 2:
q = wp.mul(rot, wp.quat(sqh, 0.0, 0.0, sqh))
return self._add_shape(
body,
wp.vec3(pos),
wp.quat(q),
GEO_CYLINDER,
wp.vec3(radius, half_height, 0.0),
None,
density,
ke,
kd,
kf,
ka,
mu,
restitution,
thickness,
is_solid,
has_ground_collision=has_ground_collision,
has_shape_collision=has_shape_collision,
collision_group=collision_group,
is_visible=is_visible,
)
def add_shape_cone(
self,
body: int,
pos: Vec3 = (0.0, 0.0, 0.0),
rot: Quat = (0.0, 0.0, 0.0, 1.0),
radius: float = 1.0,
half_height: float = 0.5,
up_axis: int = 1,
density: float = None,
ke: float = None,
kd: float = None,
kf: float = None,
ka: float = None,
mu: float = None,
restitution: float = None,
is_solid: bool = True,
thickness: float = None,
has_ground_collision: bool = True,
has_shape_collision: bool = True,
collision_group: int = -1,
is_visible: bool = True,
):
"""Adds a cone collision shape to a body.
Args:
body: The index of the parent body this shape belongs to (use -1 for static shapes)
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
radius: The radius of the cone
half_height: The half length of the cone along the up axis
up_axis: The axis along which the cone is aligned (0=x, 1=y, 2=z)
density: The density of the shape (None to use the default value :attr:`default_shape_density`)
ke: The contact elastic stiffness (None to use the default value :attr:`default_shape_ke`)
kd: The contact damping stiffness (None to use the default value :attr:`default_shape_kd`)
kf: The contact friction stiffness (None to use the default value :attr:`default_shape_kf`)
ka: The contact adhesion distance (None to use the default value :attr:`default_shape_ka`)
mu: The coefficient of friction (None to use the default value :attr:`default_shape_mu`)
restitution: The coefficient of restitution (None to use the default value :attr:`default_shape_restitution`)
is_solid: Whether the cone is solid or hollow
thickness: Thickness to use for computing inertia of a hollow cone, and for collision handling (None to use the default value :attr:`default_shape_thickness`)
has_ground_collision: If True, the shape will collide with the ground plane if `Model.ground` is True
has_shape_collision: If True, the shape will collide with other shapes
collision_group: The collision group of the shape
is_visible: Whether the cone is visible
Note:
Cones are currently not supported in rigid body collision handling.
Returns:
The index of the added shape
"""
q = rot
sqh = math.sqrt(0.5)
if up_axis == 0:
q = wp.mul(rot, wp.quat(0.0, 0.0, -sqh, sqh))
elif up_axis == 2:
q = wp.mul(rot, wp.quat(sqh, 0.0, 0.0, sqh))
return self._add_shape(
body,
wp.vec3(pos),
wp.quat(q),
GEO_CONE,
wp.vec3(radius, half_height, 0.0),
None,
density,
ke,
kd,
kf,
ka,
mu,
restitution,
thickness,
is_solid,
has_ground_collision=has_ground_collision,
has_shape_collision=has_shape_collision,
collision_group=collision_group,
is_visible=is_visible,
)
def add_shape_mesh(
self,
body: int,
pos: Optional[Vec3] = None,
rot: Optional[Quat] = None,
mesh: Optional[Mesh] = None,
scale: Optional[Vec3] = None,
density: float = None,
ke: float = None,
kd: float = None,
kf: float = None,
ka: float = None,
mu: float = None,
restitution: float = None,
is_solid: bool = True,
thickness: float = None,
has_ground_collision: bool = True,
has_shape_collision: bool = True,
collision_group: int = -1,
is_visible: bool = True,
):
"""Adds a triangle mesh collision shape to a body.
Args:
body: The index of the parent body this shape belongs to (use -1 for static shapes)
pos: The location of the shape with respect to the parent frame
(None to use the default value ``wp.vec3(0.0, 0.0, 0.0)``)
rot: The rotation of the shape with respect to the parent frame
(None to use the default value ``wp.quat(0.0, 0.0, 0.0, 1.0)``)
mesh: The mesh object
scale: Scale to use for the collider. (None to use the default value ``wp.vec3(1.0, 1.0, 1.0)``)
density: The density of the shape (None to use the default value :attr:`default_shape_density`)
ke: The contact elastic stiffness (None to use the default value :attr:`default_shape_ke`)
kd: The contact damping stiffness (None to use the default value :attr:`default_shape_kd`)
kf: The contact friction stiffness (None to use the default value :attr:`default_shape_kf`)
ka: The contact adhesion distance (None to use the default value :attr:`default_shape_ka`)
mu: The coefficient of friction (None to use the default value :attr:`default_shape_mu`)
restitution: The coefficient of restitution (None to use the default value :attr:`default_shape_restitution`)
is_solid: If True, the mesh is solid, otherwise it is a hollow surface with the given wall thickness
thickness: Thickness to use for computing inertia of a hollow mesh, and for collision handling (None to use the default value :attr:`default_shape_thickness`)
has_ground_collision: If True, the shape will collide with the ground plane if `Model.ground` is True
has_shape_collision: If True, the shape will collide with other shapes
collision_group: The collision group of the shape
is_visible: Whether the mesh is visible
Returns:
The index of the added shape
"""
if pos is None:
pos = wp.vec3(0.0, 0.0, 0.0)
if rot is None:
rot = wp.quat(0.0, 0.0, 0.0, 1.0)
if scale is None:
scale = wp.vec3(1.0, 1.0, 1.0)
return self._add_shape(
body,
pos,
rot,
GEO_MESH,
wp.vec3(scale[0], scale[1], scale[2]),
mesh,
density,
ke,
kd,
kf,
ka,
mu,
restitution,
thickness,
is_solid,
has_ground_collision=has_ground_collision,
has_shape_collision=has_shape_collision,
collision_group=collision_group,
is_visible=is_visible,
)
def add_shape_sdf(
self,
body: int,
pos: Vec3 = (0.0, 0.0, 0.0),
rot: Quat = (0.0, 0.0, 0.0, 1.0),
sdf: SDF = None,
scale: Vec3 = (1.0, 1.0, 1.0),
density: float = None,
ke: float = None,
kd: float = None,
kf: float = None,
ka: float = None,
mu: float = None,
restitution: float = None,
is_solid: bool = True,
thickness: float = None,
has_ground_collision: bool = True,
has_shape_collision: bool = True,
collision_group: int = -1,
is_visible: bool = True,
):
"""Adds SDF collision shape to a body.
Args:
body: The index of the parent body this shape belongs to (use -1 for static shapes)
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
sdf: The sdf object
scale: Scale to use for the collider
density: The density of the shape (None to use the default value :attr:`default_shape_density`)
ke: The contact elastic stiffness (None to use the default value :attr:`default_shape_ke`)
kd: The contact damping stiffness (None to use the default value :attr:`default_shape_kd`)
kf: The contact friction stiffness (None to use the default value :attr:`default_shape_kf`)
ka: The contact adhesion distance (None to use the default value :attr:`default_shape_ka`)
mu: The coefficient of friction (None to use the default value :attr:`default_shape_mu`)
restitution: The coefficient of restitution (None to use the default value :attr:`default_shape_restitution`)
is_solid: If True, the SDF is solid, otherwise it is a hollow surface with the given wall thickness
thickness: Thickness to use for collision handling (None to use the default value :attr:`default_shape_thickness`)
has_ground_collision: If True, the shape will collide with the ground plane if `Model.ground` is True
has_shape_collision: If True, the shape will collide with other shapes
collision_group: The collision group of the shape
is_visible: Whether the shape is visible
Returns:
The index of the added shape
"""
return self._add_shape(
body,
wp.vec3(pos),
wp.quat(rot),
GEO_SDF,
wp.vec3(scale[0], scale[1], scale[2]),
sdf,
density,
ke,
kd,
kf,
ka,
mu,
restitution,
thickness,
is_solid,
has_ground_collision=has_ground_collision,
has_shape_collision=has_shape_collision,
collision_group=collision_group,
is_visible=is_visible,
)
def _shape_radius(self, type, scale, src):
"""
Calculates the radius of a sphere that encloses the shape, used for broadphase collision detection.
"""
if type == GEO_SPHERE:
return scale[0]
elif type == GEO_BOX:
return np.linalg.norm(scale)
elif type == GEO_CAPSULE or type == GEO_CYLINDER or type == GEO_CONE:
return scale[0] + scale[1]
elif type == GEO_MESH:
vmax = np.max(np.abs(src.vertices), axis=0) * np.max(scale)
return np.linalg.norm(vmax)
elif type == GEO_PLANE:
if scale[0] > 0.0 and scale[1] > 0.0:
# finite plane
return np.linalg.norm(scale)
else:
return 1.0e6
else:
return 10.0
def _add_shape(
self,
body,
pos,
rot,
type,
scale,
src=None,
density=None,
ke=None,
kd=None,
kf=None,
ka=None,
mu=None,
restitution=None,
thickness=None,
is_solid=True,
collision_group=-1,
collision_filter_parent=True,
has_ground_collision=True,
has_shape_collision=True,
is_visible=True,
):
self.shape_body.append(body)
shape = self.shape_count
if body in self.body_shapes:
# no contacts between shapes of the same body
for same_body_shape in self.body_shapes[body]:
self.shape_collision_filter_pairs.add((same_body_shape, shape))
self.body_shapes[body].append(shape)
else:
self.body_shapes[body] = [shape]
ke = ke if ke is not None else self.default_shape_ke
kd = kd if kd is not None else self.default_shape_kd
kf = kf if kf is not None else self.default_shape_kf
ka = ka if ka is not None else self.default_shape_ka
mu = mu if mu is not None else self.default_shape_mu
restitution = restitution if restitution is not None else self.default_shape_restitution
thickness = thickness if thickness is not None else self.default_shape_thickness
density = density if density is not None else self.default_shape_density
self.shape_transform.append(wp.transform(pos, rot))
self.shape_visible.append(is_visible)
self.shape_geo_type.append(type)
self.shape_geo_scale.append((scale[0], scale[1], scale[2]))
self.shape_geo_src.append(src)
self.shape_geo_thickness.append(thickness)
self.shape_geo_is_solid.append(is_solid)
self.shape_material_ke.append(ke)
self.shape_material_kd.append(kd)
self.shape_material_kf.append(kf)
self.shape_material_ka.append(ka)
self.shape_material_mu.append(mu)
self.shape_material_restitution.append(restitution)
self.shape_collision_group.append(collision_group)
if collision_group not in self.shape_collision_group_map:
self.shape_collision_group_map[collision_group] = []
self.last_collision_group = max(self.last_collision_group, collision_group)
self.shape_collision_group_map[collision_group].append(shape)
self.shape_collision_radius.append(self._shape_radius(type, scale, src))
if collision_filter_parent and body > -1 and body in self.joint_parents:
for parent_body in self.joint_parents[body]:
if parent_body > -1:
for parent_shape in self.body_shapes[parent_body]:
self.shape_collision_filter_pairs.add((parent_shape, shape))
if body == -1:
has_ground_collision = False
self.shape_ground_collision.append(has_ground_collision)
self.shape_shape_collision.append(has_shape_collision)
(m, c, I) = compute_shape_mass(type, scale, src, density, is_solid, thickness)
self._update_body_mass(body, m, I, pos + c, rot)
return shape
# particles
def add_particle(
self, pos: Vec3, vel: Vec3, mass: float, radius: float = None, flags: wp.uint32 = PARTICLE_FLAG_ACTIVE
) -> int:
"""Adds a single particle to the model
Args:
pos: The initial position of the particle
vel: The initial velocity of the particle
mass: The mass of the particle
radius: The radius of the particle used in collision handling. If None, the radius is set to the default value (:attr:`default_particle_radius`).
flags: The flags that control the dynamical behavior of the particle, see PARTICLE_FLAG_* constants
Note:
Set the mass equal to zero to create a 'kinematic' particle that does is not subject to dynamics.
Returns:
The index of the particle in the system
"""
self.particle_q.append(pos)
self.particle_qd.append(vel)
self.particle_mass.append(mass)
if radius is None:
radius = self.default_particle_radius
self.particle_radius.append(radius)
self.particle_flags.append(flags)
return len(self.particle_q) - 1
def add_spring(self, i: int, j, ke: float, kd: float, control: float):
"""Adds a spring between two particles in the system
Args:
i: The index of the first particle
j: The index of the second particle
ke: The elastic stiffness of the spring
kd: The damping stiffness of the spring
control: The actuation level of the spring
Note:
The spring is created with a rest-length based on the distance
between the particles in their initial configuration.
"""
self.spring_indices.append(i)
self.spring_indices.append(j)
self.spring_stiffness.append(ke)
self.spring_damping.append(kd)
self.spring_control.append(control)
# compute rest length
p = self.particle_q[i]
q = self.particle_q[j]
delta = np.subtract(p, q)
l = np.sqrt(np.dot(delta, delta))
self.spring_rest_length.append(l)
def add_triangle(
self,
i: int,
j: int,
k: int,
tri_ke: float = default_tri_ke,
tri_ka: float = default_tri_ka,
tri_kd: float = default_tri_kd,
tri_drag: float = default_tri_drag,
tri_lift: float = default_tri_lift,
) -> float:
"""Adds a triangular FEM element between three particles in the system.
Triangles are modeled as viscoelastic elements with elastic stiffness and damping
parameters specified on the model. See model.tri_ke, model.tri_kd.
Args:
i: The index of the first particle
j: The index of the second particle
k: The index of the third particle
Return:
The area of the triangle
Note:
The triangle is created with a rest-length based on the distance
between the particles in their initial configuration.
"""
# TODO: Expose elastic parameters on a per-element basis
# compute basis for 2D rest pose
p = self.particle_q[i]
q = self.particle_q[j]
r = self.particle_q[k]
qp = q - p
rp = r - p
# construct basis aligned with the triangle
n = wp.normalize(wp.cross(qp, rp))
e1 = wp.normalize(qp)
e2 = wp.normalize(wp.cross(n, e1))
R = np.array((e1, e2))
M = np.array((qp, rp))
D = R @ M.T
area = np.linalg.det(D) / 2.0
if area <= 0.0:
print("inverted or degenerate triangle element")
return 0.0
else:
inv_D = np.linalg.inv(D)
self.tri_indices.append((i, j, k))
self.tri_poses.append(inv_D.tolist())
self.tri_activations.append(0.0)
self.tri_materials.append((tri_ke, tri_ka, tri_kd, tri_drag, tri_lift))
return area
def add_triangles(
self,
i: List[int],
j: List[int],
k: List[int],
tri_ke: Optional[List[float]] = None,
tri_ka: Optional[List[float]] = None,
tri_kd: Optional[List[float]] = None,
tri_drag: Optional[List[float]] = None,
tri_lift: Optional[List[float]] = None,
) -> List[float]:
"""Adds triangular FEM elements between groups of three particles in the system.
Triangles are modeled as viscoelastic elements with elastic stiffness and damping
Parameters specified on the model. See model.tri_ke, model.tri_kd.
Args:
i: The indices of the first particle
j: The indices of the second particle
k: The indices of the third particle
Return:
The areas of the triangles
Note:
A triangle is created with a rest-length based on the distance
between the particles in their initial configuration.
"""
# compute basis for 2D rest pose
p = np.array(self.particle_q)[i]
q = np.array(self.particle_q)[j]
r = np.array(self.particle_q)[k]
qp = q - p
rp = r - p
def normalized(a):
l = np.linalg.norm(a, axis=-1, keepdims=True)
l[l == 0] = 1.0
return a / l
n = normalized(np.cross(qp, rp))
e1 = normalized(qp)
e2 = normalized(np.cross(n, e1))
R = np.concatenate((e1[..., None], e2[..., None]), axis=-1)
M = np.concatenate((qp[..., None], rp[..., None]), axis=-1)
D = np.matmul(R.transpose(0, 2, 1), M)
areas = np.linalg.det(D) / 2.0
areas[areas < 0.0] = 0.0
valid_inds = (areas > 0.0).nonzero()[0]
if len(valid_inds) < len(areas):
print("inverted or degenerate triangle elements")
D[areas == 0.0] = np.eye(2)[None, ...]
inv_D = np.linalg.inv(D)
inds = np.concatenate((i[valid_inds, None], j[valid_inds, None], k[valid_inds, None]), axis=-1)
self.tri_indices.extend(inds.tolist())
self.tri_poses.extend(inv_D[valid_inds].tolist())
self.tri_activations.extend([0.0] * len(valid_inds))
def init_if_none(arr, defaultValue):
if arr is None:
return [defaultValue] * len(areas)
return arr
tri_ke = init_if_none(tri_ke, self.default_tri_ke)
tri_ka = init_if_none(tri_ka, self.default_tri_ka)
tri_kd = init_if_none(tri_kd, self.default_tri_kd)
tri_drag = init_if_none(tri_drag, self.default_tri_drag)
tri_lift = init_if_none(tri_lift, self.default_tri_lift)
self.tri_materials.extend(
zip(
np.array(tri_ke)[valid_inds],
np.array(tri_ka)[valid_inds],
np.array(tri_kd)[valid_inds],
np.array(tri_drag)[valid_inds],
np.array(tri_lift)[valid_inds],
)
)
return areas.tolist()
def add_tetrahedron(
self, i: int, j: int, k: int, l: int, k_mu: float = 1.0e3, k_lambda: float = 1.0e3, k_damp: float = 0.0
) -> float:
"""Adds a tetrahedral FEM element between four particles in the system.
Tetrahedra are modeled as viscoelastic elements with a NeoHookean energy
density based on [Smith et al. 2018].
Args:
i: The index of the first particle
j: The index of the second particle
k: The index of the third particle
l: The index of the fourth particle
k_mu: The first elastic Lame parameter
k_lambda: The second elastic Lame parameter
k_damp: The element's damping stiffness
Return:
The volume of the tetrahedron
Note:
The tetrahedron is created with a rest-pose based on the particle's initial configuration
"""
# compute basis for 2D rest pose
p = np.array(self.particle_q[i])
q = np.array(self.particle_q[j])
r = np.array(self.particle_q[k])
s = np.array(self.particle_q[l])
qp = q - p
rp = r - p
sp = s - p
Dm = np.array((qp, rp, sp)).T
volume = np.linalg.det(Dm) / 6.0
if volume <= 0.0:
print("inverted tetrahedral element")
else:
inv_Dm = np.linalg.inv(Dm)
self.tet_indices.append((i, j, k, l))
self.tet_poses.append(inv_Dm.tolist())
self.tet_activations.append(0.0)
self.tet_materials.append((k_mu, k_lambda, k_damp))
return volume
def add_edge(
self,
i: int,
j: int,
k: int,
l: int,
rest: float = None,
edge_ke: float = default_edge_ke,
edge_kd: float = default_edge_kd,
):
"""Adds a bending edge element between four particles in the system.
Bending elements are designed to be between two connected triangles. Then
bending energy is based of [Bridson et al. 2002]. Bending stiffness is controlled
by the `model.tri_kb` parameter.
Args:
i: The index of the first particle
j: The index of the second particle
k: The index of the third particle
l: The index of the fourth particle
rest: The rest angle across the edge in radians, if not specified it will be computed
Note:
The edge lies between the particles indexed by 'k' and 'l' parameters with the opposing
vertices indexed by 'i' and 'j'. This defines two connected triangles with counter clockwise
winding: (i, k, l), (j, l, k).
"""
# compute rest angle
if rest is None:
x1 = self.particle_q[i]
x2 = self.particle_q[j]
x3 = self.particle_q[k]
x4 = self.particle_q[l]
n1 = wp.normalize(wp.cross(x3 - x1, x4 - x1))
n2 = wp.normalize(wp.cross(x4 - x2, x3 - x2))
e = wp.normalize(x4 - x3)
d = np.clip(np.dot(n2, n1), -1.0, 1.0)
angle = math.acos(d)
sign = np.sign(np.dot(np.cross(n2, n1), e))
rest = angle * sign
self.edge_indices.append((i, j, k, l))
self.edge_rest_angle.append(rest)
self.edge_bending_properties.append((edge_ke, edge_kd))
def add_edges(
self,
i,
j,
k,
l,
rest: Optional[List[float]] = None,
edge_ke: Optional[List[float]] = None,
edge_kd: Optional[List[float]] = None,
):
"""Adds bending edge elements between groups of four particles in the system.
Bending elements are designed to be between two connected triangles. Then
bending energy is based of [Bridson et al. 2002]. Bending stiffness is controlled
by the `model.tri_kb` parameter.
Args:
i: The indices of the first particle
j: The indices of the second particle
k: The indices of the third particle
l: The indices of the fourth particle
rest: The rest angles across the edges in radians, if not specified they will be computed
Note:
The edge lies between the particles indexed by 'k' and 'l' parameters with the opposing
vertices indexed by 'i' and 'j'. This defines two connected triangles with counter clockwise
winding: (i, k, l), (j, l, k).
"""
if rest is None:
# compute rest angle
x1 = np.array(self.particle_q)[i]
x2 = np.array(self.particle_q)[j]
x3 = np.array(self.particle_q)[k]
x4 = np.array(self.particle_q)[l]
def normalized(a):
l = np.linalg.norm(a, axis=-1, keepdims=True)
l[l == 0] = 1.0
return a / l
n1 = normalized(np.cross(x3 - x1, x4 - x1))
n2 = normalized(np.cross(x4 - x2, x3 - x2))
e = normalized(x4 - x3)
def dot(a, b):
return (a * b).sum(axis=-1)
d = np.clip(dot(n2, n1), -1.0, 1.0)
angle = np.arccos(d)
sign = np.sign(dot(np.cross(n2, n1), e))
rest = angle * sign
inds = np.concatenate((i[:, None], j[:, None], k[:, None], l[:, None]), axis=-1)
self.edge_indices.extend(inds.tolist())
self.edge_rest_angle.extend(rest.tolist())
def init_if_none(arr, defaultValue):
if arr is None:
return [defaultValue] * len(i)
return arr
edge_ke = init_if_none(edge_ke, self.default_edge_ke)
edge_kd = init_if_none(edge_kd, self.default_edge_kd)
self.edge_bending_properties.extend(zip(edge_ke, edge_kd))
def add_cloth_grid(
self,
pos: Vec3,
rot: Quat,
vel: Vec3,
dim_x: int,
dim_y: int,
cell_x: float,
cell_y: float,
mass: float,
reverse_winding: bool = False,
fix_left: bool = False,
fix_right: bool = False,
fix_top: bool = False,
fix_bottom: bool = False,
tri_ke: float = default_tri_ke,
tri_ka: float = default_tri_ka,
tri_kd: float = default_tri_kd,
tri_drag: float = default_tri_drag,
tri_lift: float = default_tri_lift,
edge_ke: float = default_edge_ke,
edge_kd: float = default_edge_kd,
add_springs: bool = False,
spring_ke: float = default_spring_ke,
spring_kd: float = default_spring_kd,
):
"""Helper to create a regular planar cloth grid
Creates a rectangular grid of particles with FEM triangles and bending elements
automatically.
Args:
pos: The position of the cloth in world space
rot: The orientation of the cloth in world space
vel: The velocity of the cloth in world space
dim_x_: The number of rectangular cells along the x-axis
dim_y: The number of rectangular cells along the y-axis
cell_x: The width of each cell in the x-direction
cell_y: The width of each cell in the y-direction
mass: The mass of each particle
reverse_winding: Flip the winding of the mesh
fix_left: Make the left-most edge of particles kinematic (fixed in place)
fix_right: Make the right-most edge of particles kinematic
fix_top: Make the top-most edge of particles kinematic
fix_bottom: Make the bottom-most edge of particles kinematic
"""
def grid_index(x, y, dim_x):
return y * dim_x + x
start_vertex = len(self.particle_q)
start_tri = len(self.tri_indices)
for y in range(0, dim_y + 1):
for x in range(0, dim_x + 1):
g = wp.vec3(x * cell_x, y * cell_y, 0.0)
p = wp.quat_rotate(rot, g) + pos
m = mass
if x == 0 and fix_left:
m = 0.0
elif x == dim_x and fix_right:
m = 0.0
elif y == 0 and fix_bottom:
m = 0.0
elif y == dim_y and fix_top:
m = 0.0
self.add_particle(p, vel, m)
if x > 0 and y > 0:
if reverse_winding:
tri1 = (
start_vertex + grid_index(x - 1, y - 1, dim_x + 1),
start_vertex + grid_index(x, y - 1, dim_x + 1),
start_vertex + grid_index(x, y, dim_x + 1),
)
tri2 = (
start_vertex + grid_index(x - 1, y - 1, dim_x + 1),
start_vertex + grid_index(x, y, dim_x + 1),
start_vertex + grid_index(x - 1, y, dim_x + 1),
)
self.add_triangle(*tri1, tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
self.add_triangle(*tri2, tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
else:
tri1 = (
start_vertex + grid_index(x - 1, y - 1, dim_x + 1),
start_vertex + grid_index(x, y - 1, dim_x + 1),
start_vertex + grid_index(x - 1, y, dim_x + 1),
)
tri2 = (
start_vertex + grid_index(x, y - 1, dim_x + 1),
start_vertex + grid_index(x, y, dim_x + 1),
start_vertex + grid_index(x - 1, y, dim_x + 1),
)
self.add_triangle(*tri1, tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
self.add_triangle(*tri2, tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
end_tri = len(self.tri_indices)
# bending constraints, could create these explicitly for a grid but this
# is a good test of the adjacency structure
adj = wp.utils.MeshAdjacency(self.tri_indices[start_tri:end_tri], end_tri - start_tri)
spring_indices = set()
for _k, e in adj.edges.items():
# skip open edges
if e.f0 == -1 or e.f1 == -1:
continue
self.add_edge(
e.o0, e.o1, e.v0, e.v1, edge_ke=edge_ke, edge_kd=edge_kd
) # opposite 0, opposite 1, vertex 0, vertex 1
spring_indices.add((min(e.o0, e.o1), max(e.o0, e.o1)))
spring_indices.add((min(e.o0, e.v0), max(e.o0, e.v0)))
spring_indices.add((min(e.o0, e.v1), max(e.o0, e.v1)))
spring_indices.add((min(e.o1, e.v0), max(e.o1, e.v0)))
spring_indices.add((min(e.o1, e.v1), max(e.o1, e.v1)))
spring_indices.add((min(e.v0, e.v1), max(e.v0, e.v1)))
if add_springs:
for i, j in spring_indices:
self.add_spring(i, j, spring_ke, spring_kd, control=0.0)
def add_cloth_mesh(
self,
pos: Vec3,
rot: Quat,
scale: float,
vel: Vec3,
vertices: List[Vec3],
indices: List[int],
density: float,
edge_callback=None,
face_callback=None,
tri_ke: float = default_tri_ke,
tri_ka: float = default_tri_ka,
tri_kd: float = default_tri_kd,
tri_drag: float = default_tri_drag,
tri_lift: float = default_tri_lift,
edge_ke: float = default_edge_ke,
edge_kd: float = default_edge_kd,
add_springs: bool = False,
spring_ke: float = default_spring_ke,
spring_kd: float = default_spring_kd,
):
"""Helper to create a cloth model from a regular triangle mesh
Creates one FEM triangle element and one bending element for every face
and edge in the input triangle mesh
Args:
pos: The position of the cloth in world space
rot: The orientation of the cloth in world space
vel: The velocity of the cloth in world space
vertices: A list of vertex positions
indices: A list of triangle indices, 3 entries per-face
density: The density per-area of the mesh
edge_callback: A user callback when an edge is created
face_callback: A user callback when a face is created
Note:
The mesh should be two manifold.
"""
num_tris = int(len(indices) / 3)
start_vertex = len(self.particle_q)
start_tri = len(self.tri_indices)
# particles
for v in vertices:
p = wp.quat_rotate(rot, v * scale) + pos
self.add_particle(p, vel, 0.0)
# triangles
inds = start_vertex + np.array(indices)
inds = inds.reshape(-1, 3)
areas = self.add_triangles(
inds[:, 0],
inds[:, 1],
inds[:, 2],
[tri_ke] * num_tris,
[tri_ka] * num_tris,
[tri_kd] * num_tris,
[tri_drag] * num_tris,
[tri_lift] * num_tris,
)
for t in range(num_tris):
area = areas[t]
self.particle_mass[inds[t, 0]] += density * area / 3.0
self.particle_mass[inds[t, 1]] += density * area / 3.0
self.particle_mass[inds[t, 2]] += density * area / 3.0
end_tri = len(self.tri_indices)
adj = wp.utils.MeshAdjacency(self.tri_indices[start_tri:end_tri], end_tri - start_tri)
edgeinds = np.fromiter(
(x for e in adj.edges.values() if e.f0 != -1 and e.f1 != -1 for x in (e.o0, e.o1, e.v0, e.v1)),
int,
).reshape(-1, 4)
self.add_edges(
edgeinds[:, 0],
edgeinds[:, 1],
edgeinds[:, 2],
edgeinds[:, 0],
edge_ke=[edge_ke] * len(edgeinds),
edge_kd=[edge_kd] * len(edgeinds),
)
if add_springs:
spring_indices = set()
for i, j, k, l in edgeinds:
spring_indices.add((min(i, j), max(i, j)))
spring_indices.add((min(i, k), max(i, k)))
spring_indices.add((min(i, l), max(i, l)))
spring_indices.add((min(j, k), max(j, k)))
spring_indices.add((min(j, l), max(j, l)))
spring_indices.add((min(k, l), max(k, l)))
for i, j in spring_indices:
self.add_spring(i, j, spring_ke, spring_kd, control=0.0)
def add_particle_grid(
self,
pos: Vec3,
rot: Quat,
vel: Vec3,
dim_x: int,
dim_y: int,
dim_z: int,
cell_x: float,
cell_y: float,
cell_z: float,
mass: float,
jitter: float,
radius_mean: float = default_particle_radius,
radius_std: float = 0.0,
):
rng = np.random.default_rng()
for z in range(dim_z):
for y in range(dim_y):
for x in range(dim_x):
v = wp.vec3(x * cell_x, y * cell_y, z * cell_z)
m = mass
p = wp.quat_rotate(rot, v) + pos + wp.vec3(rng.random(3) * jitter)
if radius_std > 0.0:
r = radius_mean + np.random.randn() * radius_std
else:
r = radius_mean
self.add_particle(p, vel, m, r)
def add_soft_grid(
self,
pos: Vec3,
rot: Quat,
vel: Vec3,
dim_x: int,
dim_y: int,
dim_z: int,
cell_x: float,
cell_y: float,
cell_z: float,
density: float,
k_mu: float,
k_lambda: float,
k_damp: float,
fix_left: bool = False,
fix_right: bool = False,
fix_top: bool = False,
fix_bottom: bool = False,
tri_ke: float = default_tri_ke,
tri_ka: float = default_tri_ka,
tri_kd: float = default_tri_kd,
tri_drag: float = default_tri_drag,
tri_lift: float = default_tri_lift,
):
"""Helper to create a rectangular tetrahedral FEM grid
Creates a regular grid of FEM tetrahedra and surface triangles. Useful for example
to create beams and sheets. Each hexahedral cell is decomposed into 5
tetrahedral elements.
Args:
pos: The position of the solid in world space
rot: The orientation of the solid in world space
vel: The velocity of the solid in world space
dim_x_: The number of rectangular cells along the x-axis
dim_y: The number of rectangular cells along the y-axis
dim_z: The number of rectangular cells along the z-axis
cell_x: The width of each cell in the x-direction
cell_y: The width of each cell in the y-direction
cell_z: The width of each cell in the z-direction
density: The density of each particle
k_mu: The first elastic Lame parameter
k_lambda: The second elastic Lame parameter
k_damp: The damping stiffness
fix_left: Make the left-most edge of particles kinematic (fixed in place)
fix_right: Make the right-most edge of particles kinematic
fix_top: Make the top-most edge of particles kinematic
fix_bottom: Make the bottom-most edge of particles kinematic
"""
start_vertex = len(self.particle_q)
mass = cell_x * cell_y * cell_z * density
for z in range(dim_z + 1):
for y in range(dim_y + 1):
for x in range(dim_x + 1):
v = wp.vec3(x * cell_x, y * cell_y, z * cell_z)
m = mass
if fix_left and x == 0:
m = 0.0
if fix_right and x == dim_x:
m = 0.0
if fix_top and y == dim_y:
m = 0.0
if fix_bottom and y == 0:
m = 0.0
p = wp.quat_rotate(rot, v) + pos
self.add_particle(p, vel, m)
# dict of open faces
faces = {}
def add_face(i: int, j: int, k: int):
key = tuple(sorted((i, j, k)))
if key not in faces:
faces[key] = (i, j, k)
else:
del faces[key]
def add_tet(i: int, j: int, k: int, l: int):
self.add_tetrahedron(i, j, k, l, k_mu, k_lambda, k_damp)
add_face(i, k, j)
add_face(j, k, l)
add_face(i, j, l)
add_face(i, l, k)
def grid_index(x, y, z):
return (dim_x + 1) * (dim_y + 1) * z + (dim_x + 1) * y + x
for z in range(dim_z):
for y in range(dim_y):
for x in range(dim_x):
v0 = grid_index(x, y, z) + start_vertex
v1 = grid_index(x + 1, y, z) + start_vertex
v2 = grid_index(x + 1, y, z + 1) + start_vertex
v3 = grid_index(x, y, z + 1) + start_vertex
v4 = grid_index(x, y + 1, z) + start_vertex
v5 = grid_index(x + 1, y + 1, z) + start_vertex
v6 = grid_index(x + 1, y + 1, z + 1) + start_vertex
v7 = grid_index(x, y + 1, z + 1) + start_vertex
if (x & 1) ^ (y & 1) ^ (z & 1):
add_tet(v0, v1, v4, v3)
add_tet(v2, v3, v6, v1)
add_tet(v5, v4, v1, v6)
add_tet(v7, v6, v3, v4)
add_tet(v4, v1, v6, v3)
else:
add_tet(v1, v2, v5, v0)
add_tet(v3, v0, v7, v2)
add_tet(v4, v7, v0, v5)
add_tet(v6, v5, v2, v7)
add_tet(v5, v2, v7, v0)
# add triangles
for _k, v in faces.items():
self.add_triangle(v[0], v[1], v[2], tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
def add_soft_mesh(
self,
pos: Vec3,
rot: Quat,
scale: float,
vel: Vec3,
vertices: List[Vec3],
indices: List[int],
density: float,
k_mu: float,
k_lambda: float,
k_damp: float,
tri_ke: float = default_tri_ke,
tri_ka: float = default_tri_ka,
tri_kd: float = default_tri_kd,
tri_drag: float = default_tri_drag,
tri_lift: float = default_tri_lift,
):
"""Helper to create a tetrahedral model from an input tetrahedral mesh
Args:
pos: The position of the solid in world space
rot: The orientation of the solid in world space
vel: The velocity of the solid in world space
vertices: A list of vertex positions, array of 3D points
indices: A list of tetrahedron indices, 4 entries per-element, flattened array
density: The density per-area of the mesh
k_mu: The first elastic Lame parameter
k_lambda: The second elastic Lame parameter
k_damp: The damping stiffness
"""
num_tets = int(len(indices) / 4)
start_vertex = len(self.particle_q)
# dict of open faces
faces = {}
def add_face(i, j, k):
key = tuple(sorted((i, j, k)))
if key not in faces:
faces[key] = (i, j, k)
else:
del faces[key]
pos = wp.vec3(pos[0], pos[1], pos[2])
# add particles
for v in vertices:
v = wp.vec3(v[0], v[1], v[2])
p = wp.quat_rotate(rot, v * scale) + pos
self.add_particle(p, vel, 0.0)
# add tetrahedra
for t in range(num_tets):
v0 = start_vertex + indices[t * 4 + 0]
v1 = start_vertex + indices[t * 4 + 1]
v2 = start_vertex + indices[t * 4 + 2]
v3 = start_vertex + indices[t * 4 + 3]
volume = self.add_tetrahedron(v0, v1, v2, v3, k_mu, k_lambda, k_damp)
# distribute volume fraction to particles
if volume > 0.0:
self.particle_mass[v0] += density * volume / 4.0
self.particle_mass[v1] += density * volume / 4.0
self.particle_mass[v2] += density * volume / 4.0
self.particle_mass[v3] += density * volume / 4.0
# build open faces
add_face(v0, v2, v1)
add_face(v1, v2, v3)
add_face(v0, v1, v3)
add_face(v0, v3, v2)
# add triangles
for _k, v in faces.items():
try:
self.add_triangle(v[0], v[1], v[2], tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
except np.linalg.LinAlgError:
continue
# incrementally updates rigid body mass with additional mass and inertia expressed at a local to the body
def _update_body_mass(self, i, m, I, p, q):
if i == -1:
return
# find new COM
new_mass = self.body_mass[i] + m
if new_mass == 0.0: # no mass
return
new_com = (self.body_com[i] * self.body_mass[i] + p * m) / new_mass
# shift inertia to new COM
com_offset = new_com - self.body_com[i]
shape_offset = new_com - p
new_inertia = transform_inertia(
self.body_mass[i], self.body_inertia[i], com_offset, wp.quat_identity()
) + transform_inertia(m, I, shape_offset, q)
self.body_mass[i] = new_mass
self.body_inertia[i] = new_inertia
self.body_com[i] = new_com
if new_mass > 0.0:
self.body_inv_mass[i] = 1.0 / new_mass
else:
self.body_inv_mass[i] = 0.0
if any(x for x in new_inertia):
self.body_inv_inertia[i] = wp.inverse(new_inertia)
else:
self.body_inv_inertia[i] = new_inertia
def set_ground_plane(
self,
normal=None,
offset=0.0,
ke: float = default_shape_ke,
kd: float = default_shape_kd,
kf: float = default_shape_kf,
mu: float = default_shape_mu,
restitution: float = default_shape_restitution,
):
"""
Creates a ground plane for the world. If the normal is not specified,
the up_vector of the ModelBuilder is used.
"""
if normal is None:
normal = self.up_vector
self._ground_params = {
"plane": (*normal, offset),
"width": 0.0,
"length": 0.0,
"ke": ke,
"kd": kd,
"kf": kf,
"mu": mu,
"restitution": restitution,
}
def _create_ground_plane(self):
ground_id = self.add_shape_plane(**self._ground_params)
self._ground_created = True
# disable ground collisions as they will be treated separately
for i in range(self.shape_count - 1):
self.shape_collision_filter_pairs.add((i, ground_id))
def finalize(self, device=None, requires_grad=False) -> Model:
"""Convert this builder object to a concrete model for simulation.
After building simulation elements this method should be called to transfer
all data to device memory ready for simulation.
Args:
device: The simulation device to use, e.g.: 'cpu', 'cuda'
requires_grad: Whether to enable gradient computation for the model
Returns:
A model object.
"""
# ensure the env count is set correctly
self.num_envs = max(1, self.num_envs)
# add ground plane if not already created
if not self._ground_created:
self._create_ground_plane()
# construct particle inv masses
ms = np.array(self.particle_mass, dtype=np.float32)
# static particles (with zero mass) have zero inverse mass
particle_inv_mass = np.divide(1.0, ms, out=np.zeros_like(ms), where=ms != 0.0)
with wp.ScopedDevice(device):
# -------------------------------------
# construct Model (non-time varying) data
m = Model(device)
m.requires_grad = requires_grad
m.ground_plane_params = self._ground_params["plane"]
m.num_envs = self.num_envs
# ---------------------
# particles
# state (initial)
m.particle_q = wp.array(self.particle_q, dtype=wp.vec3, requires_grad=requires_grad)
m.particle_qd = wp.array(self.particle_qd, dtype=wp.vec3, requires_grad=requires_grad)
m.particle_mass = wp.array(self.particle_mass, dtype=wp.float32, requires_grad=requires_grad)
m.particle_inv_mass = wp.array(particle_inv_mass, dtype=wp.float32, requires_grad=requires_grad)
m.particle_radius = wp.array(self.particle_radius, dtype=wp.float32, requires_grad=requires_grad)
m.particle_flags = wp.array([flag_to_int(f) for f in self.particle_flags], dtype=wp.uint32)
m.particle_max_radius = np.max(self.particle_radius) if len(self.particle_radius) > 0 else 0.0
m.particle_max_velocity = self.particle_max_velocity
# hash-grid for particle interactions
m.particle_grid = wp.HashGrid(128, 128, 128)
# ---------------------
# collision geometry
m.shape_transform = wp.array(self.shape_transform, dtype=wp.transform, requires_grad=requires_grad)
m.shape_body = wp.array(self.shape_body, dtype=wp.int32)
m.shape_visible = wp.array(self.shape_visible, dtype=wp.bool)
m.body_shapes = self.body_shapes
# build list of ids for geometry sources (meshes, sdfs)
geo_sources = []
finalized_meshes = {} # do not duplicate meshes
for geo in self.shape_geo_src:
geo_hash = hash(geo) # avoid repeated hash computations
if geo:
if geo_hash not in finalized_meshes:
finalized_meshes[geo_hash] = geo.finalize(device=device)
geo_sources.append(finalized_meshes[geo_hash])
else:
# add null pointer
geo_sources.append(0)
m.shape_geo.type = wp.array(self.shape_geo_type, dtype=wp.int32)
m.shape_geo.source = wp.array(geo_sources, dtype=wp.uint64)
m.shape_geo.scale = wp.array(self.shape_geo_scale, dtype=wp.vec3, requires_grad=requires_grad)
m.shape_geo.is_solid = wp.array(self.shape_geo_is_solid, dtype=wp.uint8)
m.shape_geo.thickness = wp.array(self.shape_geo_thickness, dtype=wp.float32, requires_grad=requires_grad)
m.shape_geo_src = self.shape_geo_src # used for rendering
# store refs to geometry
m.geo_meshes = self.geo_meshes
m.geo_sdfs = self.geo_sdfs
m.shape_materials.ke = wp.array(self.shape_material_ke, dtype=wp.float32, requires_grad=requires_grad)
m.shape_materials.kd = wp.array(self.shape_material_kd, dtype=wp.float32, requires_grad=requires_grad)
m.shape_materials.kf = wp.array(self.shape_material_kf, dtype=wp.float32, requires_grad=requires_grad)
m.shape_materials.ka = wp.array(self.shape_material_ka, dtype=wp.float32, requires_grad=requires_grad)
m.shape_materials.mu = wp.array(self.shape_material_mu, dtype=wp.float32, requires_grad=requires_grad)
m.shape_materials.restitution = wp.array(
self.shape_material_restitution, dtype=wp.float32, requires_grad=requires_grad
)
m.shape_collision_filter_pairs = self.shape_collision_filter_pairs
m.shape_collision_group = self.shape_collision_group
m.shape_collision_group_map = self.shape_collision_group_map
m.shape_collision_radius = wp.array(
self.shape_collision_radius, dtype=wp.float32, requires_grad=requires_grad
)
m.shape_ground_collision = self.shape_ground_collision
m.shape_shape_collision = self.shape_shape_collision
# ---------------------
# springs
m.spring_indices = wp.array(self.spring_indices, dtype=wp.int32)
m.spring_rest_length = wp.array(self.spring_rest_length, dtype=wp.float32, requires_grad=requires_grad)
m.spring_stiffness = wp.array(self.spring_stiffness, dtype=wp.float32, requires_grad=requires_grad)
m.spring_damping = wp.array(self.spring_damping, dtype=wp.float32, requires_grad=requires_grad)
m.spring_control = wp.array(self.spring_control, dtype=wp.float32, requires_grad=requires_grad)
# ---------------------
# triangles
m.tri_indices = wp.array(self.tri_indices, dtype=wp.int32)
m.tri_poses = wp.array(self.tri_poses, dtype=wp.mat22, requires_grad=requires_grad)
m.tri_activations = wp.array(self.tri_activations, dtype=wp.float32, requires_grad=requires_grad)
m.tri_materials = wp.array(self.tri_materials, dtype=wp.float32, requires_grad=requires_grad)
# ---------------------
# edges
m.edge_indices = wp.array(self.edge_indices, dtype=wp.int32)
m.edge_rest_angle = wp.array(self.edge_rest_angle, dtype=wp.float32, requires_grad=requires_grad)
m.edge_bending_properties = wp.array(
self.edge_bending_properties, dtype=wp.float32, requires_grad=requires_grad
)
# ---------------------
# tetrahedra
m.tet_indices = wp.array(self.tet_indices, dtype=wp.int32)
m.tet_poses = wp.array(self.tet_poses, dtype=wp.mat33, requires_grad=requires_grad)
m.tet_activations = wp.array(self.tet_activations, dtype=wp.float32, requires_grad=requires_grad)
m.tet_materials = wp.array(self.tet_materials, dtype=wp.float32, requires_grad=requires_grad)
# -----------------------
# muscles
# close the muscle waypoint indices
muscle_start = copy.copy(self.muscle_start)
muscle_start.append(len(self.muscle_bodies))
m.muscle_start = wp.array(muscle_start, dtype=wp.int32)
m.muscle_params = wp.array(self.muscle_params, dtype=wp.float32, requires_grad=requires_grad)
m.muscle_bodies = wp.array(self.muscle_bodies, dtype=wp.int32)
m.muscle_points = wp.array(self.muscle_points, dtype=wp.vec3, requires_grad=requires_grad)
m.muscle_activations = wp.array(self.muscle_activations, dtype=wp.float32, requires_grad=requires_grad)
# --------------------------------------
# rigid bodies
m.body_q = wp.array(self.body_q, dtype=wp.transform, requires_grad=requires_grad)
m.body_qd = wp.array(self.body_qd, dtype=wp.spatial_vector, requires_grad=requires_grad)
m.body_inertia = wp.array(self.body_inertia, dtype=wp.mat33, requires_grad=requires_grad)
m.body_inv_inertia = wp.array(self.body_inv_inertia, dtype=wp.mat33, requires_grad=requires_grad)
m.body_mass = wp.array(self.body_mass, dtype=wp.float32, requires_grad=requires_grad)
m.body_inv_mass = wp.array(self.body_inv_mass, dtype=wp.float32, requires_grad=requires_grad)
m.body_com = wp.array(self.body_com, dtype=wp.vec3, requires_grad=requires_grad)
m.body_name = self.body_name
# joints
m.joint_type = wp.array(self.joint_type, dtype=wp.int32)
m.joint_parent = wp.array(self.joint_parent, dtype=wp.int32)
m.joint_child = wp.array(self.joint_child, dtype=wp.int32)
m.joint_X_p = wp.array(self.joint_X_p, dtype=wp.transform, requires_grad=requires_grad)
m.joint_X_c = wp.array(self.joint_X_c, dtype=wp.transform, requires_grad=requires_grad)
m.joint_axis_start = wp.array(self.joint_axis_start, dtype=wp.int32)
m.joint_axis_dim = wp.array(np.array(self.joint_axis_dim), dtype=wp.int32, ndim=2)
m.joint_axis = wp.array(self.joint_axis, dtype=wp.vec3, requires_grad=requires_grad)
m.joint_q = wp.array(self.joint_q, dtype=wp.float32, requires_grad=requires_grad)
m.joint_qd = wp.array(self.joint_qd, dtype=wp.float32, requires_grad=requires_grad)
m.joint_name = self.joint_name
# dynamics properties
m.joint_armature = wp.array(self.joint_armature, dtype=wp.float32, requires_grad=requires_grad)
m.joint_target_ke = wp.array(self.joint_target_ke, dtype=wp.float32, requires_grad=requires_grad)
m.joint_target_kd = wp.array(self.joint_target_kd, dtype=wp.float32, requires_grad=requires_grad)
m.joint_axis_mode = wp.array(self.joint_axis_mode, dtype=wp.int32)
m.joint_act = wp.array(self.joint_act, dtype=wp.float32, requires_grad=requires_grad)
m.joint_limit_lower = wp.array(self.joint_limit_lower, dtype=wp.float32, requires_grad=requires_grad)
m.joint_limit_upper = wp.array(self.joint_limit_upper, dtype=wp.float32, requires_grad=requires_grad)
m.joint_limit_ke = wp.array(self.joint_limit_ke, dtype=wp.float32, requires_grad=requires_grad)
m.joint_limit_kd = wp.array(self.joint_limit_kd, dtype=wp.float32, requires_grad=requires_grad)
m.joint_linear_compliance = wp.array(
self.joint_linear_compliance, dtype=wp.float32, requires_grad=requires_grad
)
m.joint_angular_compliance = wp.array(
self.joint_angular_compliance, dtype=wp.float32, requires_grad=requires_grad
)
m.joint_enabled = wp.array(self.joint_enabled, dtype=wp.int32)
# 'close' the start index arrays with a sentinel value
joint_q_start = copy.copy(self.joint_q_start)
joint_q_start.append(self.joint_coord_count)
joint_qd_start = copy.copy(self.joint_qd_start)
joint_qd_start.append(self.joint_dof_count)
articulation_start = copy.copy(self.articulation_start)
articulation_start.append(self.joint_count)
m.joint_q_start = wp.array(joint_q_start, dtype=wp.int32)
m.joint_qd_start = wp.array(joint_qd_start, dtype=wp.int32)
m.articulation_start = wp.array(articulation_start, dtype=wp.int32)
# counts
m.joint_count = self.joint_count
m.joint_axis_count = self.joint_axis_count
m.joint_dof_count = self.joint_dof_count
m.joint_coord_count = self.joint_coord_count
m.particle_count = len(self.particle_q)
m.body_count = len(self.body_q)
m.shape_count = len(self.shape_geo_type)
m.tri_count = len(self.tri_poses)
m.tet_count = len(self.tet_poses)
m.edge_count = len(self.edge_rest_angle)
m.spring_count = len(self.spring_rest_length)
m.muscle_count = len(self.muscle_start)
m.articulation_count = len(self.articulation_start)
# contacts
if m.particle_count:
m.allocate_soft_contacts(self.soft_contact_max, requires_grad=requires_grad)
m.find_shape_contact_pairs()
if self.num_rigid_contacts_per_env is None:
contact_count, limited_contact_count = m.count_contact_points()
else:
contact_count = limited_contact_count = self.num_rigid_contacts_per_env * self.num_envs
if contact_count:
if wp.config.verbose:
print(f"Allocating {contact_count} rigid contacts.")
m.allocate_rigid_contacts(
count=contact_count, limited_contact_count=limited_contact_count, requires_grad=requires_grad
)
m.rigid_mesh_contact_max = self.rigid_mesh_contact_max
m.rigid_contact_margin = self.rigid_contact_margin
m.rigid_contact_torsional_friction = self.rigid_contact_torsional_friction
m.rigid_contact_rolling_friction = self.rigid_contact_rolling_friction
# enable ground plane
m.ground_plane = wp.array(self._ground_params["plane"], dtype=wp.float32, requires_grad=requires_grad)
m.gravity = np.array(self.up_vector) * self.gravity
m.enable_tri_collisions = False
return m
| 186,281 | Python | 40.636567 | 260 | 0.585041 |
NVIDIA/warp/warp/sim/integrator_euler.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""This module contains time-integration objects for simulating
models + state forward in time.
"""
import warp as wp
from .collide import triangle_closest_point_barycentric
from .integrator import Integrator
from .model import PARTICLE_FLAG_ACTIVE, Control, Model, ModelShapeGeometry, ModelShapeMaterials, State
from .particles import eval_particle_forces
from .utils import quat_decompose, quat_twist
@wp.kernel
def eval_springs(
x: wp.array(dtype=wp.vec3),
v: wp.array(dtype=wp.vec3),
spring_indices: wp.array(dtype=int),
spring_rest_lengths: wp.array(dtype=float),
spring_stiffness: wp.array(dtype=float),
spring_damping: wp.array(dtype=float),
f: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
i = spring_indices[tid * 2 + 0]
j = spring_indices[tid * 2 + 1]
ke = spring_stiffness[tid]
kd = spring_damping[tid]
rest = spring_rest_lengths[tid]
xi = x[i]
xj = x[j]
vi = v[i]
vj = v[j]
xij = xi - xj
vij = vi - vj
l = wp.length(xij)
l_inv = 1.0 / l
# normalized spring direction
dir = xij * l_inv
c = l - rest
dcdt = wp.dot(dir, vij)
# damping based on relative velocity
fs = dir * (ke * c + kd * dcdt)
wp.atomic_sub(f, i, fs)
wp.atomic_add(f, j, fs)
@wp.kernel
def eval_triangles(
x: wp.array(dtype=wp.vec3),
v: wp.array(dtype=wp.vec3),
indices: wp.array2d(dtype=int),
pose: wp.array(dtype=wp.mat22),
activation: wp.array(dtype=float),
materials: wp.array2d(dtype=float),
f: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
k_mu = materials[tid, 0]
k_lambda = materials[tid, 1]
k_damp = materials[tid, 2]
k_drag = materials[tid, 3]
k_lift = materials[tid, 4]
i = indices[tid, 0]
j = indices[tid, 1]
k = indices[tid, 2]
x0 = x[i] # point zero
x1 = x[j] # point one
x2 = x[k] # point two
v0 = v[i] # vel zero
v1 = v[j] # vel one
v2 = v[k] # vel two
x10 = x1 - x0 # barycentric coordinates (centered at p)
x20 = x2 - x0
v10 = v1 - v0
v20 = v2 - v0
Dm = pose[tid]
inv_rest_area = wp.determinant(Dm) * 2.0 # 1 / det(A) = det(A^-1)
rest_area = 1.0 / inv_rest_area
# scale stiffness coefficients to account for area
k_mu = k_mu * rest_area
k_lambda = k_lambda * rest_area
k_damp = k_damp * rest_area
# F = Xs*Xm^-1
F1 = x10 * Dm[0, 0] + x20 * Dm[1, 0]
F2 = x10 * Dm[0, 1] + x20 * Dm[1, 1]
# dFdt = Vs*Xm^-1
dFdt1 = v10 * Dm[0, 0] + v20 * Dm[1, 0]
dFdt2 = v10 * Dm[0, 1] + v20 * Dm[1, 1]
# deviatoric PK1 + damping term
P1 = F1 * k_mu + dFdt1 * k_damp
P2 = F2 * k_mu + dFdt2 * k_damp
# -----------------------------
# St. Venant-Kirchoff
# # Green strain, F'*F-I
# e00 = dot(f1, f1) - 1.0
# e10 = dot(f2, f1)
# e01 = dot(f1, f2)
# e11 = dot(f2, f2) - 1.0
# E = wp.mat22(e00, e01,
# e10, e11)
# # local forces (deviatoric part)
# T = wp.mul(E, wp.transpose(Dm))
# # spatial forces, F*T
# fq = (f1*T[0,0] + f2*T[1,0])*k_mu*2.0
# fr = (f1*T[0,1] + f2*T[1,1])*k_mu*2.0
# alpha = 1.0
# -----------------------------
# Baraff & Witkin, note this model is not isotropic
# c1 = length(f1) - 1.0
# c2 = length(f2) - 1.0
# f1 = normalize(f1)*c1*k1
# f2 = normalize(f2)*c2*k1
# fq = f1*Dm[0,0] + f2*Dm[0,1]
# fr = f1*Dm[1,0] + f2*Dm[1,1]
# -----------------------------
# Neo-Hookean (with rest stability)
# force = P*Dm'
f1 = P1 * Dm[0, 0] + P2 * Dm[0, 1]
f2 = P1 * Dm[1, 0] + P2 * Dm[1, 1]
alpha = 1.0 + k_mu / k_lambda
# -----------------------------
# Area Preservation
n = wp.cross(x10, x20)
area = wp.length(n) * 0.5
# actuation
act = activation[tid]
# J-alpha
c = area * inv_rest_area - alpha + act
# dJdx
n = wp.normalize(n)
dcdq = wp.cross(x20, n) * inv_rest_area * 0.5
dcdr = wp.cross(n, x10) * inv_rest_area * 0.5
f_area = k_lambda * c
# -----------------------------
# Area Damping
dcdt = wp.dot(dcdq, v1) + wp.dot(dcdr, v2) - wp.dot(dcdq + dcdr, v0)
f_damp = k_damp * dcdt
f1 = f1 + dcdq * (f_area + f_damp)
f2 = f2 + dcdr * (f_area + f_damp)
f0 = f1 + f2
# -----------------------------
# Lift + Drag
vmid = (v0 + v1 + v2) * 0.3333
vdir = wp.normalize(vmid)
f_drag = vmid * (k_drag * area * wp.abs(wp.dot(n, vmid)))
f_lift = n * (k_lift * area * (wp.HALF_PI - wp.acos(wp.dot(n, vdir)))) * wp.dot(vmid, vmid)
f0 = f0 - f_drag - f_lift
f1 = f1 + f_drag + f_lift
f2 = f2 + f_drag + f_lift
# apply forces
wp.atomic_add(f, i, f0)
wp.atomic_sub(f, j, f1)
wp.atomic_sub(f, k, f2)
# @wp.func
# def triangle_closest_point(a: wp.vec3, b: wp.vec3, c: wp.vec3, p: wp.vec3):
# ab = b - a
# ac = c - a
# ap = p - a
# d1 = wp.dot(ab, ap)
# d2 = wp.dot(ac, ap)
# if (d1 <= 0.0 and d2 <= 0.0):
# return a
# bp = p - b
# d3 = wp.dot(ab, bp)
# d4 = wp.dot(ac, bp)
# if (d3 >= 0.0 and d4 <= d3):
# return b
# vc = d1 * d4 - d3 * d2
# v = d1 / (d1 - d3)
# if (vc <= 0.0 and d1 >= 0.0 and d3 <= 0.0):
# return a + ab * v
# cp = p - c
# d5 = dot(ab, cp)
# d6 = dot(ac, cp)
# if (d6 >= 0.0 and d5 <= d6):
# return c
# vb = d5 * d2 - d1 * d6
# w = d2 / (d2 - d6)
# if (vb <= 0.0 and d2 >= 0.0 and d6 <= 0.0):
# return a + ac * w
# va = d3 * d6 - d5 * d4
# w = (d4 - d3) / ((d4 - d3) + (d5 - d6))
# if (va <= 0.0 and (d4 - d3) >= 0.0 and (d5 - d6) >= 0.0):
# return b + (c - b) * w
# denom = 1.0 / (va + vb + vc)
# v = vb * denom
# w = vc * denom
# return a + ab * v + ac * w
@wp.kernel
def eval_triangles_contact(
# idx : wp.array(dtype=int), # list of indices for colliding particles
num_particles: int, # size of particles
x: wp.array(dtype=wp.vec3),
v: wp.array(dtype=wp.vec3),
indices: wp.array2d(dtype=int),
materials: wp.array2d(dtype=float),
f: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
face_no = tid // num_particles # which face
particle_no = tid % num_particles # which particle
# k_mu = materials[face_no, 0]
# k_lambda = materials[face_no, 1]
# k_damp = materials[face_no, 2]
# k_drag = materials[face_no, 3]
# k_lift = materials[face_no, 4]
# at the moment, just one particle
pos = x[particle_no]
i = indices[face_no, 0]
j = indices[face_no, 1]
k = indices[face_no, 2]
if i == particle_no or j == particle_no or k == particle_no:
return
p = x[i] # point zero
q = x[j] # point one
r = x[k] # point two
# vp = v[i] # vel zero
# vq = v[j] # vel one
# vr = v[k] # vel two
# qp = q-p # barycentric coordinates (centered at p)
# rp = r-p
bary = triangle_closest_point_barycentric(p, q, r, pos)
closest = p * bary[0] + q * bary[1] + r * bary[2]
diff = pos - closest
dist = wp.dot(diff, diff)
n = wp.normalize(diff)
c = wp.min(dist - 0.01, 0.0) # 0 unless within 0.01 of surface
# c = wp.leaky_min(dot(n, x0)-0.01, 0.0, 0.0)
fn = n * c * 1e5
wp.atomic_sub(f, particle_no, fn)
# # apply forces (could do - f / 3 here)
wp.atomic_add(f, i, fn * bary[0])
wp.atomic_add(f, j, fn * bary[1])
wp.atomic_add(f, k, fn * bary[2])
@wp.kernel
def eval_triangles_body_contacts(
num_particles: int, # number of particles (size of contact_point)
x: wp.array(dtype=wp.vec3), # position of particles
v: wp.array(dtype=wp.vec3),
indices: wp.array(dtype=int), # triangle indices
body_x: wp.array(dtype=wp.vec3), # body body positions
body_r: wp.array(dtype=wp.quat),
body_v: wp.array(dtype=wp.vec3),
body_w: wp.array(dtype=wp.vec3),
contact_body: wp.array(dtype=int),
contact_point: wp.array(dtype=wp.vec3), # position of contact points relative to body
contact_dist: wp.array(dtype=float),
contact_mat: wp.array(dtype=int),
materials: wp.array(dtype=float),
# body_f : wp.array(dtype=wp.vec3),
# body_t : wp.array(dtype=wp.vec3),
tri_f: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
face_no = tid // num_particles # which face
particle_no = tid % num_particles # which particle
# -----------------------
# load body body point
c_body = contact_body[particle_no]
c_point = contact_point[particle_no]
c_dist = contact_dist[particle_no]
c_mat = contact_mat[particle_no]
# hard coded surface parameter tensor layout (ke, kd, kf, mu)
ke = materials[c_mat * 4 + 0] # restitution coefficient
kd = materials[c_mat * 4 + 1] # damping coefficient
kf = materials[c_mat * 4 + 2] # friction coefficient
mu = materials[c_mat * 4 + 3] # coulomb friction
x0 = body_x[c_body] # position of colliding body
r0 = body_r[c_body] # orientation of colliding body
v0 = body_v[c_body]
w0 = body_w[c_body]
# transform point to world space
pos = x0 + wp.quat_rotate(r0, c_point)
# use x0 as center, everything is offset from center of mass
# moment arm
r = pos - x0 # basically just c_point in the new coordinates
rhat = wp.normalize(r)
pos = pos + rhat * c_dist # add on 'thickness' of shape, e.g.: radius of sphere/capsule
# contact point velocity
dpdt = v0 + wp.cross(w0, r) # this is body velocity cross offset, so it's the velocity of the contact point.
# -----------------------
# load triangle
i = indices[face_no * 3 + 0]
j = indices[face_no * 3 + 1]
k = indices[face_no * 3 + 2]
p = x[i] # point zero
q = x[j] # point one
r = x[k] # point two
vp = v[i] # vel zero
vq = v[j] # vel one
vr = v[k] # vel two
bary = triangle_closest_point_barycentric(p, q, r, pos)
closest = p * bary[0] + q * bary[1] + r * bary[2]
diff = pos - closest # vector from tri to point
dist = wp.dot(diff, diff) # squared distance
n = wp.normalize(diff) # points into the object
c = wp.min(dist - 0.05, 0.0) # 0 unless within 0.05 of surface
# c = wp.leaky_min(wp.dot(n, x0)-0.01, 0.0, 0.0)
# fn = n * c * 1e6 # points towards cloth (both n and c are negative)
# wp.atomic_sub(tri_f, particle_no, fn)
fn = c * ke # normal force (restitution coefficient * how far inside for ground) (negative)
vtri = vp * bary[0] + vq * bary[1] + vr * bary[2] # bad approximation for centroid velocity
vrel = vtri - dpdt
vn = wp.dot(n, vrel) # velocity component of body in negative normal direction
vt = vrel - n * vn # velocity component not in normal direction
# contact damping
fd = -wp.max(vn, 0.0) * kd * wp.step(c) # again, negative, into the ground
# # viscous friction
# ft = vt*kf
# Coulomb friction (box)
lower = mu * (fn + fd)
upper = -lower
nx = wp.cross(n, wp.vec3(0.0, 0.0, 1.0)) # basis vectors for tangent
nz = wp.cross(n, wp.vec3(1.0, 0.0, 0.0))
vx = wp.clamp(wp.dot(nx * kf, vt), lower, upper)
vz = wp.clamp(wp.dot(nz * kf, vt), lower, upper)
ft = (nx * vx + nz * vz) * (-wp.step(c)) # wp.vec3(vx, 0.0, vz)*wp.step(c)
# # Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
# #ft = wp.normalize(vt)*wp.min(kf*wp.length(vt), -mu*c*ke)
f_total = n * (fn + fd) + ft
wp.atomic_add(tri_f, i, f_total * bary[0])
wp.atomic_add(tri_f, j, f_total * bary[1])
wp.atomic_add(tri_f, k, f_total * bary[2])
@wp.kernel
def eval_bending(
x: wp.array(dtype=wp.vec3),
v: wp.array(dtype=wp.vec3),
indices: wp.array2d(dtype=int),
rest: wp.array(dtype=float),
bending_properties: wp.array2d(dtype=float),
f: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
ke = bending_properties[tid, 0]
kd = bending_properties[tid, 1]
i = indices[tid, 0]
j = indices[tid, 1]
k = indices[tid, 2]
l = indices[tid, 3]
rest_angle = rest[tid]
x1 = x[i]
x2 = x[j]
x3 = x[k]
x4 = x[l]
v1 = v[i]
v2 = v[j]
v3 = v[k]
v4 = v[l]
n1 = wp.cross(x3 - x1, x4 - x1) # normal to face 1
n2 = wp.cross(x4 - x2, x3 - x2) # normal to face 2
n1_length = wp.length(n1)
n2_length = wp.length(n2)
if n1_length < 1.0e-6 or n2_length < 1.0e-6:
return
rcp_n1 = 1.0 / n1_length
rcp_n2 = 1.0 / n2_length
cos_theta = wp.dot(n1, n2) * rcp_n1 * rcp_n2
n1 = n1 * rcp_n1 * rcp_n1
n2 = n2 * rcp_n2 * rcp_n2
e = x4 - x3
e_hat = wp.normalize(e)
e_length = wp.length(e)
s = wp.sign(wp.dot(wp.cross(n2, n1), e_hat))
angle = wp.acos(cos_theta) * s
d1 = n1 * e_length
d2 = n2 * e_length
d3 = n1 * wp.dot(x1 - x4, e_hat) + n2 * wp.dot(x2 - x4, e_hat)
d4 = n1 * wp.dot(x3 - x1, e_hat) + n2 * wp.dot(x3 - x2, e_hat)
# elastic
f_elastic = ke * (angle - rest_angle)
# damping
f_damp = kd * (wp.dot(d1, v1) + wp.dot(d2, v2) + wp.dot(d3, v3) + wp.dot(d4, v4))
# total force, proportional to edge length
f_total = -e_length * (f_elastic + f_damp)
wp.atomic_add(f, i, d1 * f_total)
wp.atomic_add(f, j, d2 * f_total)
wp.atomic_add(f, k, d3 * f_total)
wp.atomic_add(f, l, d4 * f_total)
@wp.kernel
def eval_tetrahedra(
x: wp.array(dtype=wp.vec3),
v: wp.array(dtype=wp.vec3),
indices: wp.array2d(dtype=int),
pose: wp.array(dtype=wp.mat33),
activation: wp.array(dtype=float),
materials: wp.array2d(dtype=float),
f: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
i = indices[tid, 0]
j = indices[tid, 1]
k = indices[tid, 2]
l = indices[tid, 3]
act = activation[tid]
k_mu = materials[tid, 0]
k_lambda = materials[tid, 1]
k_damp = materials[tid, 2]
x0 = x[i]
x1 = x[j]
x2 = x[k]
x3 = x[l]
v0 = v[i]
v1 = v[j]
v2 = v[k]
v3 = v[l]
x10 = x1 - x0
x20 = x2 - x0
x30 = x3 - x0
v10 = v1 - v0
v20 = v2 - v0
v30 = v3 - v0
Ds = wp.mat33(x10, x20, x30)
Dm = pose[tid]
inv_rest_volume = wp.determinant(Dm) * 6.0
rest_volume = 1.0 / inv_rest_volume
alpha = 1.0 + k_mu / k_lambda - k_mu / (4.0 * k_lambda)
# scale stiffness coefficients to account for area
k_mu = k_mu * rest_volume
k_lambda = k_lambda * rest_volume
k_damp = k_damp * rest_volume
# F = Xs*Xm^-1
F = Ds * Dm
dFdt = wp.mat33(v10, v20, v30) * Dm
col1 = wp.vec3(F[0, 0], F[1, 0], F[2, 0])
col2 = wp.vec3(F[0, 1], F[1, 1], F[2, 1])
col3 = wp.vec3(F[0, 2], F[1, 2], F[2, 2])
# -----------------------------
# Neo-Hookean (with rest stability [Smith et al 2018])
Ic = wp.dot(col1, col1) + wp.dot(col2, col2) + wp.dot(col3, col3)
# deviatoric part
P = F * k_mu * (1.0 - 1.0 / (Ic + 1.0)) + dFdt * k_damp
H = P * wp.transpose(Dm)
f1 = wp.vec3(H[0, 0], H[1, 0], H[2, 0])
f2 = wp.vec3(H[0, 1], H[1, 1], H[2, 1])
f3 = wp.vec3(H[0, 2], H[1, 2], H[2, 2])
# -----------------------------
# C_sqrt
# alpha = 1.0
# r_s = wp.sqrt(wp.abs(dot(col1, col1) + dot(col2, col2) + dot(col3, col3) - 3.0))
# f1 = wp.vec3()
# f2 = wp.vec3()
# f3 = wp.vec3()
# if (r_s > 0.0):
# r_s_inv = 1.0/r_s
# C = r_s
# dCdx = F*wp.transpose(Dm)*r_s_inv*wp.sign(r_s)
# grad1 = vec3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
# grad2 = vec3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
# grad3 = vec3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
# f1 = grad1*C*k_mu
# f2 = grad2*C*k_mu
# f3 = grad3*C*k_mu
# -----------------------------
# C_spherical
# alpha = 1.0
# r_s = wp.sqrt(dot(col1, col1) + dot(col2, col2) + dot(col3, col3))
# r_s_inv = 1.0/r_s
# C = r_s - wp.sqrt(3.0)
# dCdx = F*wp.transpose(Dm)*r_s_inv
# grad1 = vec3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
# grad2 = vec3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
# grad3 = vec3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
# f1 = grad1*C*k_mu
# f2 = grad2*C*k_mu
# f3 = grad3*C*k_mu
# ----------------------------
# C_D
# alpha = 1.0
# r_s = wp.sqrt(dot(col1, col1) + dot(col2, col2) + dot(col3, col3))
# C = r_s*r_s - 3.0
# dCdx = F*wp.transpose(Dm)*2.0
# grad1 = vec3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
# grad2 = vec3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
# grad3 = vec3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
# f1 = grad1*C*k_mu
# f2 = grad2*C*k_mu
# f3 = grad3*C*k_mu
# ----------------------------
# Hookean
# alpha = 1.0
# I = wp.mat33(wp.vec3(1.0, 0.0, 0.0),
# wp.vec3(0.0, 1.0, 0.0),
# wp.vec3(0.0, 0.0, 1.0))
# P = (F + wp.transpose(F) + I*(0.0-2.0))*k_mu
# H = P * wp.transpose(Dm)
# f1 = wp.vec3(H[0, 0], H[1, 0], H[2, 0])
# f2 = wp.vec3(H[0, 1], H[1, 1], H[2, 1])
# f3 = wp.vec3(H[0, 2], H[1, 2], H[2, 2])
# hydrostatic part
J = wp.determinant(F)
# print(J)
s = inv_rest_volume / 6.0
dJdx1 = wp.cross(x20, x30) * s
dJdx2 = wp.cross(x30, x10) * s
dJdx3 = wp.cross(x10, x20) * s
f_volume = (J - alpha + act) * k_lambda
f_damp = (wp.dot(dJdx1, v1) + wp.dot(dJdx2, v2) + wp.dot(dJdx3, v3)) * k_damp
f_total = f_volume + f_damp
f1 = f1 + dJdx1 * f_total
f2 = f2 + dJdx2 * f_total
f3 = f3 + dJdx3 * f_total
f0 = -(f1 + f2 + f3)
# apply forces
wp.atomic_sub(f, i, f0)
wp.atomic_sub(f, j, f1)
wp.atomic_sub(f, k, f2)
wp.atomic_sub(f, l, f3)
@wp.kernel
def eval_particle_ground_contacts(
particle_x: wp.array(dtype=wp.vec3),
particle_v: wp.array(dtype=wp.vec3),
particle_radius: wp.array(dtype=float),
particle_flags: wp.array(dtype=wp.uint32),
ke: float,
kd: float,
kf: float,
mu: float,
ground: wp.array(dtype=float),
# outputs
f: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
if (particle_flags[tid] & PARTICLE_FLAG_ACTIVE) == 0:
return
x = particle_x[tid]
v = particle_v[tid]
radius = particle_radius[tid]
n = wp.vec3(ground[0], ground[1], ground[2])
c = wp.min(wp.dot(n, x) + ground[3] - radius, 0.0)
vn = wp.dot(n, v)
jn = c * ke
if c >= 0.0:
return
jd = min(vn, 0.0) * kd
# contact force
fn = jn + jd
# friction force
vt = v - n * vn
vs = wp.length(vt)
if vs > 0.0:
vt = vt / vs
# Coulomb condition
ft = wp.min(vs * kf, mu * wp.abs(fn))
# total force
f[tid] = f[tid] - n * fn - vt * ft
@wp.kernel
def eval_particle_contacts(
particle_x: wp.array(dtype=wp.vec3),
particle_v: wp.array(dtype=wp.vec3),
body_q: wp.array(dtype=wp.transform),
body_qd: wp.array(dtype=wp.spatial_vector),
particle_radius: wp.array(dtype=float),
particle_flags: wp.array(dtype=wp.uint32),
body_com: wp.array(dtype=wp.vec3),
shape_body: wp.array(dtype=int),
shape_materials: ModelShapeMaterials,
particle_ke: float,
particle_kd: float,
particle_kf: float,
particle_mu: float,
particle_ka: float,
contact_count: wp.array(dtype=int),
contact_particle: wp.array(dtype=int),
contact_shape: wp.array(dtype=int),
contact_body_pos: wp.array(dtype=wp.vec3),
contact_body_vel: wp.array(dtype=wp.vec3),
contact_normal: wp.array(dtype=wp.vec3),
contact_max: int,
# outputs
particle_f: wp.array(dtype=wp.vec3),
body_f: wp.array(dtype=wp.spatial_vector),
):
tid = wp.tid()
count = min(contact_max, contact_count[0])
if tid >= count:
return
shape_index = contact_shape[tid]
body_index = shape_body[shape_index]
particle_index = contact_particle[tid]
if (particle_flags[particle_index] & PARTICLE_FLAG_ACTIVE) == 0:
return
px = particle_x[particle_index]
pv = particle_v[particle_index]
X_wb = wp.transform_identity()
X_com = wp.vec3()
body_v_s = wp.spatial_vector()
if body_index >= 0:
X_wb = body_q[body_index]
X_com = body_com[body_index]
body_v_s = body_qd[body_index]
# body position in world space
bx = wp.transform_point(X_wb, contact_body_pos[tid])
r = bx - wp.transform_point(X_wb, X_com)
n = contact_normal[tid]
c = wp.dot(n, px - bx) - particle_radius[tid]
if c > particle_ka:
return
# take average material properties of shape and particle parameters
ke = 0.5 * (particle_ke + shape_materials.ke[shape_index])
kd = 0.5 * (particle_kd + shape_materials.kd[shape_index])
kf = 0.5 * (particle_kf + shape_materials.kf[shape_index])
mu = 0.5 * (particle_mu + shape_materials.mu[shape_index])
body_w = wp.spatial_top(body_v_s)
body_v = wp.spatial_bottom(body_v_s)
# compute the body velocity at the particle position
bv = body_v + wp.cross(body_w, r) + wp.transform_vector(X_wb, contact_body_vel[tid])
# relative velocity
v = pv - bv
# decompose relative velocity
vn = wp.dot(n, v)
vt = v - n * vn
# contact elastic
fn = n * c * ke
# contact damping
fd = n * wp.min(vn, 0.0) * kd
# viscous friction
# ft = vt*kf
# Coulomb friction (box)
# lower = mu * c * ke
# upper = -lower
# vx = wp.clamp(wp.dot(wp.vec3(kf, 0.0, 0.0), vt), lower, upper)
# vz = wp.clamp(wp.dot(wp.vec3(0.0, 0.0, kf), vt), lower, upper)
# ft = wp.vec3(vx, 0.0, vz)
# Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
ft = wp.normalize(vt) * wp.min(kf * wp.length(vt), abs(mu * c * ke))
f_total = fn + (fd + ft)
t_total = wp.cross(r, f_total)
wp.atomic_sub(particle_f, particle_index, f_total)
if body_index >= 0:
wp.atomic_add(body_f, body_index, wp.spatial_vector(t_total, f_total))
@wp.kernel
def eval_rigid_contacts(
body_q: wp.array(dtype=wp.transform),
body_qd: wp.array(dtype=wp.spatial_vector),
body_com: wp.array(dtype=wp.vec3),
shape_materials: ModelShapeMaterials,
geo: ModelShapeGeometry,
shape_body: wp.array(dtype=int),
contact_count: wp.array(dtype=int),
contact_point0: wp.array(dtype=wp.vec3),
contact_point1: wp.array(dtype=wp.vec3),
contact_normal: wp.array(dtype=wp.vec3),
contact_shape0: wp.array(dtype=int),
contact_shape1: wp.array(dtype=int),
force_in_world_frame: bool,
# outputs
body_f: wp.array(dtype=wp.spatial_vector),
):
tid = wp.tid()
count = contact_count[0]
if tid >= count:
return
# retrieve contact thickness, compute average contact material properties
ke = 0.0 # contact normal force stiffness
kd = 0.0 # damping coefficient
kf = 0.0 # friction force stiffness
ka = 0.0 # adhesion distance
mu = 0.0 # friction coefficient
mat_nonzero = 0
thickness_a = 0.0
thickness_b = 0.0
shape_a = contact_shape0[tid]
shape_b = contact_shape1[tid]
if shape_a == shape_b:
return
body_a = -1
body_b = -1
if shape_a >= 0:
mat_nonzero += 1
ke += shape_materials.ke[shape_a]
kd += shape_materials.kd[shape_a]
kf += shape_materials.kf[shape_a]
ka += shape_materials.ka[shape_a]
mu += shape_materials.mu[shape_a]
thickness_a = geo.thickness[shape_a]
body_a = shape_body[shape_a]
if shape_b >= 0:
mat_nonzero += 1
ke += shape_materials.ke[shape_b]
kd += shape_materials.kd[shape_b]
kf += shape_materials.kf[shape_b]
ka += shape_materials.ka[shape_b]
mu += shape_materials.mu[shape_b]
thickness_b = geo.thickness[shape_b]
body_b = shape_body[shape_b]
if mat_nonzero > 0:
ke /= float(mat_nonzero)
kd /= float(mat_nonzero)
kf /= float(mat_nonzero)
ka /= float(mat_nonzero)
mu /= float(mat_nonzero)
# contact normal in world space
n = contact_normal[tid]
bx_a = contact_point0[tid]
bx_b = contact_point1[tid]
if body_a >= 0:
X_wb_a = body_q[body_a]
X_com_a = body_com[body_a]
bx_a = wp.transform_point(X_wb_a, bx_a) - thickness_a * n
r_a = bx_a - wp.transform_point(X_wb_a, X_com_a)
if body_b >= 0:
X_wb_b = body_q[body_b]
X_com_b = body_com[body_b]
bx_b = wp.transform_point(X_wb_b, bx_b) + thickness_b * n
r_b = bx_b - wp.transform_point(X_wb_b, X_com_b)
d = wp.dot(n, bx_a - bx_b)
if d >= ka:
return
# compute contact point velocity
bv_a = wp.vec3(0.0)
bv_b = wp.vec3(0.0)
if body_a >= 0:
body_v_s_a = body_qd[body_a]
body_w_a = wp.spatial_top(body_v_s_a)
body_v_a = wp.spatial_bottom(body_v_s_a)
if force_in_world_frame:
bv_a = body_v_a + wp.cross(body_w_a, bx_a)
else:
bv_a = body_v_a + wp.cross(body_w_a, r_a)
if body_b >= 0:
body_v_s_b = body_qd[body_b]
body_w_b = wp.spatial_top(body_v_s_b)
body_v_b = wp.spatial_bottom(body_v_s_b)
if force_in_world_frame:
bv_b = body_v_b + wp.cross(body_w_b, bx_b)
else:
bv_b = body_v_b + wp.cross(body_w_b, r_b)
# relative velocity
v = bv_a - bv_b
# print(v)
# decompose relative velocity
vn = wp.dot(n, v)
vt = v - n * vn
# contact elastic
fn = d * ke
# contact damping
fd = wp.min(vn, 0.0) * kd * wp.step(d)
# viscous friction
# ft = vt*kf
# Coulomb friction (box)
# lower = mu * d * ke
# upper = -lower
# vx = wp.clamp(wp.dot(wp.vec3(kf, 0.0, 0.0), vt), lower, upper)
# vz = wp.clamp(wp.dot(wp.vec3(0.0, 0.0, kf), vt), lower, upper)
# ft = wp.vec3(vx, 0.0, vz)
# Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
# ft = wp.normalize(vt)*wp.min(kf*wp.length(vt), abs(mu*d*ke))
ft = wp.vec3(0.0)
if d < 0.0:
ft = wp.normalize(vt) * wp.min(kf * wp.length(vt), -mu * (fn + fd))
f_total = n * (fn + fd) + ft
# f_total = n * fn
if body_a >= 0:
if force_in_world_frame:
wp.atomic_add(body_f, body_a, wp.spatial_vector(wp.cross(bx_a, f_total), f_total))
else:
wp.atomic_sub(body_f, body_a, wp.spatial_vector(wp.cross(r_a, f_total), f_total))
if body_b >= 0:
if force_in_world_frame:
wp.atomic_sub(body_f, body_b, wp.spatial_vector(wp.cross(bx_b, f_total), f_total))
else:
wp.atomic_add(body_f, body_b, wp.spatial_vector(wp.cross(r_b, f_total), f_total))
@wp.func
def eval_joint_force(
q: float,
qd: float,
act: float,
target_ke: float,
target_kd: float,
limit_lower: float,
limit_upper: float,
limit_ke: float,
limit_kd: float,
mode: wp.int32,
) -> float:
"""Joint force evaluation for a single degree of freedom."""
limit_f = 0.0
damping_f = 0.0
target_f = 0.0
if mode == wp.sim.JOINT_MODE_FORCE:
target_f = act
elif mode == wp.sim.JOINT_MODE_TARGET_POSITION:
target_f = target_ke * (act - q) - target_kd * qd
elif mode == wp.sim.JOINT_MODE_TARGET_VELOCITY:
target_f = target_ke * (act - qd)
# compute limit forces, damping only active when limit is violated
if q < limit_lower:
limit_f = limit_ke * (limit_lower - q)
damping_f = -limit_kd * qd
if mode == wp.sim.JOINT_MODE_TARGET_VELOCITY:
target_f = 0.0 # override target force when limit is violated
elif q > limit_upper:
limit_f = limit_ke * (limit_upper - q)
damping_f = -limit_kd * qd
if mode == wp.sim.JOINT_MODE_TARGET_VELOCITY:
target_f = 0.0 # override target force when limit is violated
return limit_f + damping_f + target_f
@wp.kernel
def eval_body_joints(
body_q: wp.array(dtype=wp.transform),
body_qd: wp.array(dtype=wp.spatial_vector),
body_com: wp.array(dtype=wp.vec3),
joint_qd_start: wp.array(dtype=int),
joint_type: wp.array(dtype=int),
joint_enabled: wp.array(dtype=int),
joint_child: wp.array(dtype=int),
joint_parent: wp.array(dtype=int),
joint_X_p: wp.array(dtype=wp.transform),
joint_X_c: wp.array(dtype=wp.transform),
joint_axis: wp.array(dtype=wp.vec3),
joint_axis_start: wp.array(dtype=int),
joint_axis_dim: wp.array(dtype=int, ndim=2),
joint_axis_mode: wp.array(dtype=int),
joint_act: wp.array(dtype=float),
joint_target_ke: wp.array(dtype=float),
joint_target_kd: wp.array(dtype=float),
joint_limit_lower: wp.array(dtype=float),
joint_limit_upper: wp.array(dtype=float),
joint_limit_ke: wp.array(dtype=float),
joint_limit_kd: wp.array(dtype=float),
joint_attach_ke: float,
joint_attach_kd: float,
body_f: wp.array(dtype=wp.spatial_vector),
):
tid = wp.tid()
type = joint_type[tid]
# early out for free joints
if joint_enabled[tid] == 0 or type == wp.sim.JOINT_FREE:
return
c_child = joint_child[tid]
c_parent = joint_parent[tid]
X_pj = joint_X_p[tid]
X_cj = joint_X_c[tid]
X_wp = X_pj
r_p = wp.vec3()
w_p = wp.vec3()
v_p = wp.vec3()
# parent transform and moment arm
if c_parent >= 0:
X_wp = body_q[c_parent] * X_wp
r_p = wp.transform_get_translation(X_wp) - wp.transform_point(body_q[c_parent], body_com[c_parent])
twist_p = body_qd[c_parent]
w_p = wp.spatial_top(twist_p)
v_p = wp.spatial_bottom(twist_p) + wp.cross(w_p, r_p)
# child transform and moment arm
X_wc = body_q[c_child] * X_cj
r_c = wp.transform_get_translation(X_wc) - wp.transform_point(body_q[c_child], body_com[c_child])
twist_c = body_qd[c_child]
w_c = wp.spatial_top(twist_c)
v_c = wp.spatial_bottom(twist_c) + wp.cross(w_c, r_c)
# joint properties (for 1D joints)
# q_start = joint_q_start[tid]
# qd_start = joint_qd_start[tid]
axis_start = joint_axis_start[tid]
lin_axis_count = joint_axis_dim[tid, 0]
ang_axis_count = joint_axis_dim[tid, 1]
x_p = wp.transform_get_translation(X_wp)
x_c = wp.transform_get_translation(X_wc)
q_p = wp.transform_get_rotation(X_wp)
q_c = wp.transform_get_rotation(X_wc)
# translational error
x_err = x_c - x_p
r_err = wp.quat_inverse(q_p) * q_c
v_err = v_c - v_p
w_err = w_c - w_p
# total force/torque on the parent
t_total = wp.vec3()
f_total = wp.vec3()
# reduce angular damping stiffness for stability
angular_damping_scale = 0.01
if type == wp.sim.JOINT_FIXED:
ang_err = wp.normalize(wp.vec3(r_err[0], r_err[1], r_err[2])) * wp.acos(r_err[3]) * 2.0
f_total += x_err * joint_attach_ke + v_err * joint_attach_kd
t_total += (
wp.transform_vector(X_wp, ang_err) * joint_attach_ke + w_err * joint_attach_kd * angular_damping_scale
)
if type == wp.sim.JOINT_PRISMATIC:
axis = joint_axis[axis_start]
# world space joint axis
axis_p = wp.transform_vector(X_wp, axis)
# evaluate joint coordinates
q = wp.dot(x_err, axis_p)
qd = wp.dot(v_err, axis_p)
act = joint_act[axis_start]
f_total = axis_p * -eval_joint_force(
q,
qd,
act,
joint_target_ke[axis_start],
joint_target_kd[axis_start],
joint_limit_lower[axis_start],
joint_limit_upper[axis_start],
joint_limit_ke[axis_start],
joint_limit_kd[axis_start],
joint_axis_mode[axis_start],
)
# attachment dynamics
ang_err = wp.normalize(wp.vec3(r_err[0], r_err[1], r_err[2])) * wp.acos(r_err[3]) * 2.0
# project off any displacement along the joint axis
f_total += (x_err - q * axis_p) * joint_attach_ke + (v_err - qd * axis_p) * joint_attach_kd
t_total += (
wp.transform_vector(X_wp, ang_err) * joint_attach_ke + w_err * joint_attach_kd * angular_damping_scale
)
if type == wp.sim.JOINT_REVOLUTE:
axis = joint_axis[axis_start]
axis_p = wp.transform_vector(X_wp, axis)
axis_c = wp.transform_vector(X_wc, axis)
# swing twist decomposition
twist = quat_twist(axis, r_err)
q = wp.acos(twist[3]) * 2.0 * wp.sign(wp.dot(axis, wp.vec3(twist[0], twist[1], twist[2])))
qd = wp.dot(w_err, axis_p)
act = joint_act[axis_start]
t_total = axis_p * -eval_joint_force(
q,
qd,
act,
joint_target_ke[axis_start],
joint_target_kd[axis_start],
joint_limit_lower[axis_start],
joint_limit_upper[axis_start],
joint_limit_ke[axis_start],
joint_limit_kd[axis_start],
joint_axis_mode[axis_start],
)
# attachment dynamics
swing_err = wp.cross(axis_p, axis_c)
f_total += x_err * joint_attach_ke + v_err * joint_attach_kd
t_total += swing_err * joint_attach_ke + (w_err - qd * axis_p) * joint_attach_kd * angular_damping_scale
if type == wp.sim.JOINT_BALL:
ang_err = wp.normalize(wp.vec3(r_err[0], r_err[1], r_err[2])) * wp.acos(r_err[3]) * 2.0
# TODO joint limits
# TODO expose target_kd or target_ke for ball joints
# t_total += target_kd * w_err + target_ke * wp.transform_vector(X_wp, ang_err)
f_total += x_err * joint_attach_ke + v_err * joint_attach_kd
if type == wp.sim.JOINT_COMPOUND:
q_pc = wp.quat_inverse(q_p) * q_c
# decompose to a compound rotation each axis
angles = quat_decompose(q_pc)
# reconstruct rotation axes
axis_0 = wp.vec3(1.0, 0.0, 0.0)
q_0 = wp.quat_from_axis_angle(axis_0, angles[0])
axis_1 = wp.quat_rotate(q_0, wp.vec3(0.0, 1.0, 0.0))
q_1 = wp.quat_from_axis_angle(axis_1, angles[1])
axis_2 = wp.quat_rotate(q_1 * q_0, wp.vec3(0.0, 0.0, 1.0))
# q_2 = wp.quat_from_axis_angle(axis_2, angles[2])
# q_w = q_p
axis_0 = wp.transform_vector(X_wp, axis_0)
axis_1 = wp.transform_vector(X_wp, axis_1)
axis_2 = wp.transform_vector(X_wp, axis_2)
# joint dynamics
# # TODO remove wp.quat_rotate(q_w, ...)?
# t_total += eval_joint_force(angles[0], wp.dot(wp.quat_rotate(q_w, axis_0), w_err), joint_target[axis_start+0], joint_target_ke[axis_start+0],joint_target_kd[axis_start+0], joint_act[axis_start+0], joint_limit_lower[axis_start+0], joint_limit_upper[axis_start+0], joint_limit_ke[axis_start+0], joint_limit_kd[axis_start+0], wp.quat_rotate(q_w, axis_0))
# t_total += eval_joint_force(angles[1], wp.dot(wp.quat_rotate(q_w, axis_1), w_err), joint_target[axis_start+1], joint_target_ke[axis_start+1],joint_target_kd[axis_start+1], joint_act[axis_start+1], joint_limit_lower[axis_start+1], joint_limit_upper[axis_start+1], joint_limit_ke[axis_start+1], joint_limit_kd[axis_start+1], wp.quat_rotate(q_w, axis_1))
# t_total += eval_joint_force(angles[2], wp.dot(wp.quat_rotate(q_w, axis_2), w_err), joint_target[axis_start+2], joint_target_ke[axis_start+2],joint_target_kd[axis_start+2], joint_act[axis_start+2], joint_limit_lower[axis_start+2], joint_limit_upper[axis_start+2], joint_limit_ke[axis_start+2], joint_limit_kd[axis_start+2], wp.quat_rotate(q_w, axis_2))
t_total += axis_0 * -eval_joint_force(
angles[0],
wp.dot(axis_0, w_err),
joint_act[axis_start + 0],
joint_target_ke[axis_start + 0],
joint_target_kd[axis_start + 0],
joint_limit_lower[axis_start + 0],
joint_limit_upper[axis_start + 0],
joint_limit_ke[axis_start + 0],
joint_limit_kd[axis_start + 0],
joint_axis_mode[axis_start + 0],
)
t_total += axis_1 * -eval_joint_force(
angles[1],
wp.dot(axis_1, w_err),
joint_act[axis_start + 1],
joint_target_ke[axis_start + 1],
joint_target_kd[axis_start + 1],
joint_limit_lower[axis_start + 1],
joint_limit_upper[axis_start + 1],
joint_limit_ke[axis_start + 1],
joint_limit_kd[axis_start + 1],
joint_axis_mode[axis_start + 1],
)
t_total += axis_2 * -eval_joint_force(
angles[2],
wp.dot(axis_2, w_err),
joint_act[axis_start + 2],
joint_target_ke[axis_start + 2],
joint_target_kd[axis_start + 2],
joint_limit_lower[axis_start + 2],
joint_limit_upper[axis_start + 2],
joint_limit_ke[axis_start + 2],
joint_limit_kd[axis_start + 2],
joint_axis_mode[axis_start + 2],
)
f_total += x_err * joint_attach_ke + v_err * joint_attach_kd
if type == wp.sim.JOINT_UNIVERSAL:
q_pc = wp.quat_inverse(q_p) * q_c
# decompose to a compound rotation each axis
angles = quat_decompose(q_pc)
# reconstruct rotation axes
axis_0 = wp.vec3(1.0, 0.0, 0.0)
q_0 = wp.quat_from_axis_angle(axis_0, angles[0])
axis_1 = wp.quat_rotate(q_0, wp.vec3(0.0, 1.0, 0.0))
q_1 = wp.quat_from_axis_angle(axis_1, angles[1])
axis_2 = wp.quat_rotate(q_1 * q_0, wp.vec3(0.0, 0.0, 1.0))
axis_0 = wp.transform_vector(X_wp, axis_0)
axis_1 = wp.transform_vector(X_wp, axis_1)
axis_2 = wp.transform_vector(X_wp, axis_2)
# joint dynamics
t_total += axis_0 * -eval_joint_force(
angles[0],
wp.dot(axis_0, w_err),
joint_act[axis_start + 0],
joint_target_ke[axis_start + 0],
joint_target_kd[axis_start + 0],
joint_limit_lower[axis_start + 0],
joint_limit_upper[axis_start + 0],
joint_limit_ke[axis_start + 0],
joint_limit_kd[axis_start + 0],
joint_axis_mode[axis_start + 0],
)
t_total += axis_1 * -eval_joint_force(
angles[1],
wp.dot(axis_1, w_err),
joint_act[axis_start + 1],
joint_target_ke[axis_start + 1],
joint_target_kd[axis_start + 1],
joint_limit_lower[axis_start + 1],
joint_limit_upper[axis_start + 1],
joint_limit_ke[axis_start + 1],
joint_limit_kd[axis_start + 1],
joint_axis_mode[axis_start + 1],
)
# last axis (fixed)
t_total += axis_2 * -eval_joint_force(
angles[2],
wp.dot(axis_2, w_err),
0.0,
joint_attach_ke,
joint_attach_kd * angular_damping_scale,
0.0,
0.0,
0.0,
0.0,
wp.sim.JOINT_MODE_FORCE,
)
f_total += x_err * joint_attach_ke + v_err * joint_attach_kd
if type == wp.sim.JOINT_D6:
pos = wp.vec3(0.0)
vel = wp.vec3(0.0)
if lin_axis_count >= 1:
axis_0 = wp.transform_vector(X_wp, joint_axis[axis_start + 0])
q0 = wp.dot(x_err, axis_0)
qd0 = wp.dot(v_err, axis_0)
f_total += axis_0 * -eval_joint_force(
q0,
qd0,
joint_act[axis_start + 0],
joint_target_ke[axis_start + 0],
joint_target_kd[axis_start + 0],
joint_limit_lower[axis_start + 0],
joint_limit_upper[axis_start + 0],
joint_limit_ke[axis_start + 0],
joint_limit_kd[axis_start + 0],
joint_axis_mode[axis_start + 0],
)
pos += q0 * axis_0
vel += qd0 * axis_0
if lin_axis_count >= 2:
axis_1 = wp.transform_vector(X_wp, joint_axis[axis_start + 1])
q1 = wp.dot(x_err, axis_1)
qd1 = wp.dot(v_err, axis_1)
f_total += axis_1 * -eval_joint_force(
q1,
qd1,
joint_act[axis_start + 1],
joint_target_ke[axis_start + 1],
joint_target_kd[axis_start + 1],
joint_limit_lower[axis_start + 1],
joint_limit_upper[axis_start + 1],
joint_limit_ke[axis_start + 1],
joint_limit_kd[axis_start + 1],
joint_axis_mode[axis_start + 1],
)
pos += q1 * axis_1
vel += qd1 * axis_1
if lin_axis_count == 3:
axis_2 = wp.transform_vector(X_wp, joint_axis[axis_start + 2])
q2 = wp.dot(x_err, axis_2)
qd2 = wp.dot(v_err, axis_2)
f_total += axis_2 * -eval_joint_force(
q2,
qd2,
joint_act[axis_start + 2],
joint_target_ke[axis_start + 2],
joint_target_kd[axis_start + 2],
joint_limit_lower[axis_start + 2],
joint_limit_upper[axis_start + 2],
joint_limit_ke[axis_start + 2],
joint_limit_kd[axis_start + 2],
joint_axis_mode[axis_start + 2],
)
pos += q2 * axis_2
vel += qd2 * axis_2
f_total += (x_err - pos) * joint_attach_ke + (v_err - vel) * joint_attach_kd
if ang_axis_count == 0:
ang_err = wp.normalize(wp.vec3(r_err[0], r_err[1], r_err[2])) * wp.acos(r_err[3]) * 2.0
t_total += (
wp.transform_vector(X_wp, ang_err) * joint_attach_ke + w_err * joint_attach_kd * angular_damping_scale
)
i_0 = lin_axis_count + axis_start + 0
i_1 = lin_axis_count + axis_start + 1
i_2 = lin_axis_count + axis_start + 2
if ang_axis_count == 1:
axis = joint_axis[i_0]
axis_p = wp.transform_vector(X_wp, axis)
axis_c = wp.transform_vector(X_wc, axis)
# swing twist decomposition
twist = quat_twist(axis, r_err)
q = wp.acos(twist[3]) * 2.0 * wp.sign(wp.dot(axis, wp.vec3(twist[0], twist[1], twist[2])))
qd = wp.dot(w_err, axis_p)
t_total = axis_p * -eval_joint_force(
q,
qd,
joint_act[i_0],
joint_target_ke[i_0],
joint_target_kd[i_0],
joint_limit_lower[i_0],
joint_limit_upper[i_0],
joint_limit_ke[i_0],
joint_limit_kd[i_0],
joint_axis_mode[i_0],
)
# attachment dynamics
swing_err = wp.cross(axis_p, axis_c)
t_total += swing_err * joint_attach_ke + (w_err - qd * axis_p) * joint_attach_kd * angular_damping_scale
if ang_axis_count == 2:
q_pc = wp.quat_inverse(q_p) * q_c
# decompose to a compound rotation each axis
angles = quat_decompose(q_pc)
orig_axis_0 = joint_axis[i_0]
orig_axis_1 = joint_axis[i_1]
orig_axis_2 = wp.cross(orig_axis_0, orig_axis_1)
# reconstruct rotation axes
axis_0 = orig_axis_0
q_0 = wp.quat_from_axis_angle(axis_0, angles[0])
axis_1 = wp.quat_rotate(q_0, orig_axis_1)
q_1 = wp.quat_from_axis_angle(axis_1, angles[1])
axis_2 = wp.quat_rotate(q_1 * q_0, orig_axis_2)
axis_0 = wp.transform_vector(X_wp, axis_0)
axis_1 = wp.transform_vector(X_wp, axis_1)
axis_2 = wp.transform_vector(X_wp, axis_2)
# joint dynamics
t_total += axis_0 * -eval_joint_force(
angles[0],
wp.dot(axis_0, w_err),
joint_act[i_0],
joint_target_ke[i_0],
joint_target_kd[i_0],
joint_limit_lower[i_0],
joint_limit_upper[i_0],
joint_limit_ke[i_0],
joint_limit_kd[i_0],
joint_axis_mode[i_0],
)
t_total += axis_1 * -eval_joint_force(
angles[1],
wp.dot(axis_1, w_err),
joint_act[i_1],
joint_target_ke[i_1],
joint_target_kd[i_1],
joint_limit_lower[i_1],
joint_limit_upper[i_1],
joint_limit_ke[i_1],
joint_limit_kd[i_1],
joint_axis_mode[i_1],
)
# last axis (fixed)
t_total += axis_2 * -eval_joint_force(
angles[2],
wp.dot(axis_2, w_err),
0.0,
joint_attach_ke,
joint_attach_kd * angular_damping_scale,
0.0,
0.0,
0.0,
0.0,
wp.sim.JOINT_MODE_FORCE,
)
if ang_axis_count == 3:
q_pc = wp.quat_inverse(q_p) * q_c
# decompose to a compound rotation each axis
angles = quat_decompose(q_pc)
orig_axis_0 = joint_axis[i_0]
orig_axis_1 = joint_axis[i_1]
orig_axis_2 = joint_axis[i_2]
# reconstruct rotation axes
axis_0 = orig_axis_0
q_0 = wp.quat_from_axis_angle(axis_0, angles[0])
axis_1 = wp.quat_rotate(q_0, orig_axis_1)
q_1 = wp.quat_from_axis_angle(axis_1, angles[1])
axis_2 = wp.quat_rotate(q_1 * q_0, orig_axis_2)
axis_0 = wp.transform_vector(X_wp, axis_0)
axis_1 = wp.transform_vector(X_wp, axis_1)
axis_2 = wp.transform_vector(X_wp, axis_2)
t_total += axis_0 * -eval_joint_force(
angles[0],
wp.dot(axis_0, w_err),
joint_act[i_0],
joint_target_ke[i_0],
joint_target_kd[i_0],
joint_limit_lower[i_0],
joint_limit_upper[i_0],
joint_limit_ke[i_0],
joint_limit_kd[i_0],
joint_axis_mode[i_0],
)
t_total += axis_1 * -eval_joint_force(
angles[1],
wp.dot(axis_1, w_err),
joint_act[i_1],
joint_target_ke[i_1],
joint_target_kd[i_1],
joint_limit_lower[i_1],
joint_limit_upper[i_1],
joint_limit_ke[i_1],
joint_limit_kd[i_1],
joint_axis_mode[i_1],
)
t_total += axis_2 * -eval_joint_force(
angles[2],
wp.dot(axis_2, w_err),
joint_act[i_2],
joint_target_ke[i_2],
joint_target_kd[i_2],
joint_limit_lower[i_2],
joint_limit_upper[i_2],
joint_limit_ke[i_2],
joint_limit_kd[i_2],
joint_axis_mode[i_2],
)
# write forces
if c_parent >= 0:
wp.atomic_add(body_f, c_parent, wp.spatial_vector(t_total + wp.cross(r_p, f_total), f_total))
wp.atomic_sub(body_f, c_child, wp.spatial_vector(t_total + wp.cross(r_c, f_total), f_total))
@wp.func
def compute_muscle_force(
i: int,
body_X_s: wp.array(dtype=wp.transform),
body_v_s: wp.array(dtype=wp.spatial_vector),
body_com: wp.array(dtype=wp.vec3),
muscle_links: wp.array(dtype=int),
muscle_points: wp.array(dtype=wp.vec3),
muscle_activation: float,
body_f_s: wp.array(dtype=wp.spatial_vector),
):
link_0 = muscle_links[i]
link_1 = muscle_links[i + 1]
if link_0 == link_1:
return 0
r_0 = muscle_points[i]
r_1 = muscle_points[i + 1]
xform_0 = body_X_s[link_0]
xform_1 = body_X_s[link_1]
pos_0 = wp.transform_point(xform_0, r_0 - body_com[link_0])
pos_1 = wp.transform_point(xform_1, r_1 - body_com[link_1])
n = wp.normalize(pos_1 - pos_0)
# todo: add passive elastic and viscosity terms
f = n * muscle_activation
wp.atomic_sub(body_f_s, link_0, wp.spatial_vector(f, wp.cross(pos_0, f)))
wp.atomic_add(body_f_s, link_1, wp.spatial_vector(f, wp.cross(pos_1, f)))
@wp.kernel
def eval_muscles(
body_X_s: wp.array(dtype=wp.transform),
body_v_s: wp.array(dtype=wp.spatial_vector),
body_com: wp.array(dtype=wp.vec3),
muscle_start: wp.array(dtype=int),
muscle_params: wp.array(dtype=float),
muscle_links: wp.array(dtype=int),
muscle_points: wp.array(dtype=wp.vec3),
muscle_activation: wp.array(dtype=float),
# output
body_f_s: wp.array(dtype=wp.spatial_vector),
):
tid = wp.tid()
m_start = muscle_start[tid]
m_end = muscle_start[tid + 1] - 1
activation = muscle_activation[tid]
for i in range(m_start, m_end):
compute_muscle_force(i, body_X_s, body_v_s, body_com, muscle_links, muscle_points, activation, body_f_s)
def eval_spring_forces(model: Model, state: State, particle_f: wp.array):
if model.spring_count:
wp.launch(
kernel=eval_springs,
dim=model.spring_count,
inputs=[
state.particle_q,
state.particle_qd,
model.spring_indices,
model.spring_rest_length,
model.spring_stiffness,
model.spring_damping,
],
outputs=[particle_f],
device=model.device,
)
def eval_triangle_forces(model: Model, state: State, control: Control, particle_f: wp.array):
if model.tri_count:
wp.launch(
kernel=eval_triangles,
dim=model.tri_count,
inputs=[
state.particle_q,
state.particle_qd,
model.tri_indices,
model.tri_poses,
control.tri_activations,
model.tri_materials,
],
outputs=[particle_f],
device=model.device,
)
def eval_triangle_contact_forces(model: Model, state: State, particle_f: wp.array):
if model.enable_tri_collisions:
wp.launch(
kernel=eval_triangles_contact,
dim=model.tri_count * model.particle_count,
inputs=[
model.particle_count,
state.particle_q,
state.particle_qd,
model.tri_indices,
model.tri_materials,
],
outputs=[particle_f],
device=model.device,
)
def eval_bending_forces(model: Model, state: State, particle_f: wp.array):
if model.edge_count:
wp.launch(
kernel=eval_bending,
dim=model.edge_count,
inputs=[
state.particle_q,
state.particle_qd,
model.edge_indices,
model.edge_rest_angle,
model.edge_bending_properties,
],
outputs=[particle_f],
device=model.device,
)
def eval_particle_ground_contact_forces(model: Model, state: State, particle_f: wp.array):
if model.ground and model.particle_count:
wp.launch(
kernel=eval_particle_ground_contacts,
dim=model.particle_count,
inputs=[
state.particle_q,
state.particle_qd,
model.particle_radius,
model.particle_flags,
model.soft_contact_ke,
model.soft_contact_kd,
model.soft_contact_kf,
model.soft_contact_mu,
model.ground_plane,
],
outputs=[particle_f],
device=model.device,
)
def eval_tetrahedral_forces(model: Model, state: State, control: Control, particle_f: wp.array):
if model.tet_count:
wp.launch(
kernel=eval_tetrahedra,
dim=model.tet_count,
inputs=[
state.particle_q,
state.particle_qd,
model.tet_indices,
model.tet_poses,
control.tet_activations,
model.tet_materials,
],
outputs=[particle_f],
device=model.device,
)
def eval_body_contact_forces(model: Model, state: State, particle_f: wp.array):
if model.rigid_contact_max and (
model.ground and model.shape_ground_contact_pair_count or model.shape_contact_pair_count
):
wp.launch(
kernel=eval_rigid_contacts,
dim=model.rigid_contact_max,
inputs=[
state.body_q,
state.body_qd,
model.body_com,
model.shape_materials,
model.shape_geo,
model.shape_body,
model.rigid_contact_count,
model.rigid_contact_point0,
model.rigid_contact_point1,
model.rigid_contact_normal,
model.rigid_contact_shape0,
model.rigid_contact_shape1,
False,
],
outputs=[state.body_f],
device=model.device,
)
def eval_body_joint_forces(model: Model, state: State, control: Control, body_f: wp.array):
if model.joint_count:
wp.launch(
kernel=eval_body_joints,
dim=model.joint_count,
inputs=[
state.body_q,
state.body_qd,
model.body_com,
model.joint_qd_start,
model.joint_type,
model.joint_enabled,
model.joint_child,
model.joint_parent,
model.joint_X_p,
model.joint_X_c,
model.joint_axis,
model.joint_axis_start,
model.joint_axis_dim,
model.joint_axis_mode,
control.joint_act,
model.joint_target_ke,
model.joint_target_kd,
model.joint_limit_lower,
model.joint_limit_upper,
model.joint_limit_ke,
model.joint_limit_kd,
model.joint_attach_ke,
model.joint_attach_kd,
],
outputs=[body_f],
device=model.device,
)
def eval_particle_body_contact_forces(model: Model, state: State, particle_f: wp.array, body_f: wp.array):
if model.particle_count and model.shape_count > 1:
wp.launch(
kernel=eval_particle_contacts,
dim=model.soft_contact_max,
inputs=[
state.particle_q,
state.particle_qd,
state.body_q,
state.body_qd,
model.particle_radius,
model.particle_flags,
model.body_com,
model.shape_body,
model.shape_materials,
model.soft_contact_ke,
model.soft_contact_kd,
model.soft_contact_kf,
model.soft_contact_mu,
model.particle_adhesion,
model.soft_contact_count,
model.soft_contact_particle,
model.soft_contact_shape,
model.soft_contact_body_pos,
model.soft_contact_body_vel,
model.soft_contact_normal,
model.soft_contact_max,
],
# outputs
outputs=[particle_f, body_f],
device=model.device,
)
def eval_muscle_forces(model: Model, state: State, control: Control, body_f: wp.array):
if model.muscle_count:
wp.launch(
kernel=eval_muscles,
dim=model.muscle_count,
inputs=[
state.body_q,
state.body_qd,
model.body_com,
model.muscle_start,
model.muscle_params,
model.muscle_bodies,
model.muscle_points,
control.muscle_activations,
],
outputs=[body_f],
device=model.device,
)
def compute_forces(model: Model, state: State, control: Control, particle_f: wp.array, body_f: wp.array, dt: float):
# damped springs
eval_spring_forces(model, state, particle_f)
# triangle elastic and lift/drag forces
eval_triangle_forces(model, state, control, particle_f)
# triangle/triangle contacts
eval_triangle_contact_forces(model, state, particle_f)
# triangle bending
eval_bending_forces(model, state, particle_f)
# tetrahedral FEM
eval_tetrahedral_forces(model, state, control, particle_f)
# body joints
eval_body_joint_forces(model, state, control, body_f)
# particle-particle interactions
eval_particle_forces(model, state, particle_f)
# particle ground contacts
eval_particle_ground_contact_forces(model, state, particle_f)
# body contacts
eval_body_contact_forces(model, state, particle_f)
# particle shape contact
eval_particle_body_contact_forces(model, state, particle_f, body_f)
# muscles
if False:
eval_muscle_forces(model, state, control, body_f)
class SemiImplicitIntegrator(Integrator):
"""A semi-implicit integrator using symplectic Euler
After constructing `Model` and `State` objects this time-integrator
may be used to advance the simulation state forward in time.
Semi-implicit time integration is a variational integrator that
preserves energy, however it not unconditionally stable, and requires a time-step
small enough to support the required stiffness and damping forces.
See: https://en.wikipedia.org/wiki/Semi-implicit_Euler_method
Example
-------
.. code-block:: python
integrator = wp.SemiImplicitIntegrator()
# simulation loop
for i in range(100):
state = integrator.simulate(model, state_in, state_out, dt)
"""
def __init__(self, angular_damping: float = 0.05):
"""
Args:
angular_damping (float, optional): Angular damping factor. Defaults to 0.05.
"""
self.angular_damping = angular_damping
def simulate(self, model: Model, state_in: State, state_out: State, dt: float, control: Control = None):
with wp.ScopedTimer("simulate", False):
particle_f = None
body_f = None
if state_in.particle_count:
particle_f = state_in.particle_f
if state_in.body_count:
body_f = state_in.body_f
if control is None:
control = model.control(clone_variables=False)
compute_forces(model, state_in, control, particle_f, body_f, dt)
self.integrate_bodies(model, state_in, state_out, dt, self.angular_damping)
self.integrate_particles(model, state_in, state_out, dt)
return state_out
| 59,364 | Python | 29.334696 | 361 | 0.536419 |
NVIDIA/warp/warp/sim/import_urdf.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import xml.etree.ElementTree as ET
from typing import Union
import numpy as np
import warp as wp
from warp.sim.model import Mesh
def parse_urdf(
urdf_filename,
builder,
xform=None,
floating=False,
base_joint: Union[dict, str] = None,
density=1000.0,
stiffness=100.0,
damping=10.0,
armature=0.0,
contact_ke=1.0e4,
contact_kd=1.0e3,
contact_kf=1.0e2,
contact_ka=0.0,
contact_mu=0.25,
contact_restitution=0.5,
contact_thickness=0.0,
limit_ke=100.0,
limit_kd=10.0,
joint_limit_lower=-1e6,
joint_limit_upper=1e6,
scale=1.0,
parse_visuals_as_colliders=False,
force_show_colliders=False,
enable_self_collisions=True,
ignore_inertial_definitions=True,
ensure_nonstatic_links=True,
static_link_mass=1e-2,
collapse_fixed_joints=False,
):
"""
Parses a URDF file and adds the bodies and joints to the given ModelBuilder.
Args:
urdf_filename (str): The filename of the URDF file to parse.
builder (ModelBuilder): The :class:`ModelBuilder` to add the bodies and joints to.
xform (:ref:`transform <transform>`): The transform to apply to the root body.
floating (bool): If True, the root body is a free joint. If False, the root body is connected via a fixed joint to the world, unless a `base_joint` is defined.
base_joint (Union[str, dict]): The joint by which the root body is connected to the world. This can be either a string defining the joint axes of a D6 joint with comma-separated positional and angular axis names (e.g. "px,py,rz" for a D6 joint with linear axes in x, y and an angular axis in z) or a dict with joint parameters (see :meth:`ModelBuilder.add_joint`).
density (float): The density of the shapes in kg/m^3 which will be used to calculate the body mass and inertia.
stiffness (float): The stiffness of the joints.
damping (float): The damping of the joints.
armature (float): The armature of the joints (bias to add to the inertia diagonals that may stabilize the simulation).
contact_ke (float): The stiffness of the shape contacts (used by the Euler integrators).
contact_kd (float): The damping of the shape contacts (used by the Euler integrators).
contact_kf (float): The friction stiffness of the shape contacts (used by the Euler integrators).
contact_ka (float): The adhesion distance of the shape contacts (used by the Euler integrators).
contact_mu (float): The friction coefficient of the shape contacts.
contact_restitution (float): The restitution coefficient of the shape contacts.
contact_thickness (float): The thickness to add to the shape geometry.
limit_ke (float): The stiffness of the joint limits (used by the Euler integrators).
limit_kd (float): The damping of the joint limits (used by the Euler integrators).
joint_limit_lower (float): The default lower joint limit if not specified in the URDF.
joint_limit_upper (float): The default upper joint limit if not specified in the URDF.
scale (float): The scaling factor to apply to the imported mechanism.
parse_visuals_as_colliders (bool): If True, the geometry defined under the `<visual>` tags is used for collision handling instead of the `<collision>` geometries.
force_show_colliders (bool): If True, the collision shapes are always shown, even if there are visual shapes.
enable_self_collisions (bool): If True, self-collisions are enabled.
ignore_inertial_definitions (bool): If True, the inertial parameters defined in the URDF are ignored and the inertia is calculated from the shape geometry.
ensure_nonstatic_links (bool): If True, links with zero mass are given a small mass (see `static_link_mass`) to ensure they are dynamic.
static_link_mass (float): The mass to assign to links with zero mass (if `ensure_nonstatic_links` is set to True).
collapse_fixed_joints (bool): If True, fixed joints are removed and the respective bodies are merged.
"""
if xform is None:
xform = wp.transform()
file = ET.parse(urdf_filename)
root = file.getroot()
contact_vars = {
"ke": contact_ke,
"kd": contact_kd,
"kf": contact_kf,
"ka": contact_ka,
"mu": contact_mu,
"restitution": contact_restitution,
"thickness": contact_thickness,
}
def parse_transform(element):
if element is None or element.find("origin") is None:
return wp.transform()
origin = element.find("origin")
xyz = origin.get("xyz") or "0 0 0"
rpy = origin.get("rpy") or "0 0 0"
xyz = [float(x) * scale for x in xyz.split()]
rpy = [float(x) for x in rpy.split()]
return wp.transform(xyz, wp.quat_rpy(*rpy))
def parse_shapes(link, geoms, density, incoming_xform=None, visible=True, just_visual=False):
shapes = []
# add geometry
for geom_group in geoms:
geo = geom_group.find("geometry")
if geo is None:
continue
tf = parse_transform(geom_group)
if incoming_xform is not None:
tf = incoming_xform * tf
for box in geo.findall("box"):
size = box.get("size") or "1 1 1"
size = [float(x) for x in size.split()]
s = builder.add_shape_box(
body=link,
pos=wp.vec3(tf.p),
rot=wp.quat(tf.q),
hx=size[0] * 0.5 * scale,
hy=size[1] * 0.5 * scale,
hz=size[2] * 0.5 * scale,
density=density,
is_visible=visible,
has_ground_collision=not just_visual,
has_shape_collision=not just_visual,
**contact_vars,
)
shapes.append(s)
for sphere in geo.findall("sphere"):
s = builder.add_shape_sphere(
body=link,
pos=wp.vec3(tf.p),
rot=wp.quat(tf.q),
radius=float(sphere.get("radius") or "1") * scale,
density=density,
is_visible=visible,
has_ground_collision=not just_visual,
has_shape_collision=not just_visual,
**contact_vars,
)
shapes.append(s)
for cylinder in geo.findall("cylinder"):
s = builder.add_shape_capsule(
body=link,
pos=wp.vec3(tf.p),
rot=wp.quat(tf.q),
radius=float(cylinder.get("radius") or "1") * scale,
half_height=float(cylinder.get("length") or "1") * 0.5 * scale,
density=density,
up_axis=2, # cylinders in URDF are aligned with z-axis
is_visible=visible,
has_ground_collision=not just_visual,
has_shape_collision=not just_visual,
**contact_vars,
)
shapes.append(s)
for mesh in geo.findall("mesh"):
filename = mesh.get("filename")
if filename is None:
continue
if filename.startswith("package://"):
fn = filename.replace("package://", "")
package_name = fn.split("/")[0]
urdf_folder = os.path.dirname(urdf_filename)
# resolve file path from package name, i.e. find
# the package folder from the URDF folder
if package_name in urdf_folder:
filename = os.path.join(urdf_folder[: urdf_folder.index(package_name)], fn)
else:
wp.utils.warn(
f'Warning: package "{package_name}" not found in URDF folder while loading mesh at "{filename}"'
)
elif filename.startswith("http://") or filename.startswith("https://"):
# download mesh
import shutil
import tempfile
import requests
with tempfile.TemporaryDirectory() as tmpdir:
# get filename extension
extension = os.path.splitext(filename)[1]
tmpfile = os.path.join(tmpdir, "mesh" + extension)
with requests.get(filename, stream=True) as r:
with open(tmpfile, "wb") as f:
shutil.copyfileobj(r.raw, f)
filename = tmpfile
else:
filename = os.path.join(os.path.dirname(urdf_filename), filename)
if not os.path.exists(filename):
wp.utils.warn(f"Warning: mesh file {filename} does not exist")
continue
import trimesh
# use force='mesh' to load the mesh as a trimesh object
# with baked in transforms, e.g. from COLLADA files
m = trimesh.load(filename, force="mesh")
scaling = mesh.get("scale") or "1 1 1"
scaling = np.array([float(x) * scale for x in scaling.split()])
if hasattr(m, "geometry"):
# multiple meshes are contained in a scene
for geom in m.geometry.values():
vertices = np.array(geom.vertices, dtype=np.float32) * scaling
faces = np.array(geom.faces.flatten(), dtype=np.int32)
mesh = Mesh(vertices, faces)
s = builder.add_shape_mesh(
body=link,
pos=wp.vec3(tf.p),
rot=wp.quat(tf.q),
mesh=mesh,
density=density,
is_visible=visible,
has_ground_collision=not just_visual,
has_shape_collision=not just_visual,
**contact_vars,
)
shapes.append(s)
else:
# a single mesh
vertices = np.array(m.vertices, dtype=np.float32) * scaling
faces = np.array(m.faces.flatten(), dtype=np.int32)
mesh = Mesh(vertices, faces)
s = builder.add_shape_mesh(
body=link,
pos=wp.vec3(tf.p),
rot=wp.quat(tf.q),
mesh=mesh,
density=density,
is_visible=visible,
has_ground_collision=not just_visual,
has_shape_collision=not just_visual,
**contact_vars,
)
shapes.append(s)
return shapes
# maps from link name -> link index
link_index = {}
visual_shapes = []
builder.add_articulation()
start_shape_count = len(builder.shape_geo_type)
# add links
for _i, urdf_link in enumerate(root.findall("link")):
name = urdf_link.get("name")
link = builder.add_body(origin=wp.transform_identity(), armature=armature, name=name)
# add ourselves to the index
link_index[name] = link
visuals = urdf_link.findall("visual")
colliders = urdf_link.findall("collision")
if parse_visuals_as_colliders:
colliders = visuals
else:
s = parse_shapes(link, visuals, density=0.0, just_visual=True)
visual_shapes.extend(s)
show_colliders = force_show_colliders
if parse_visuals_as_colliders:
show_colliders = True
elif len(visuals) == 0:
# we need to show the collision shapes since there are no visual shapes
show_colliders = True
parse_shapes(link, colliders, density=density, visible=show_colliders)
m = builder.body_mass[link]
if not ignore_inertial_definitions and urdf_link.find("inertial") is not None:
# overwrite inertial parameters if defined
inertial = urdf_link.find("inertial")
inertial_frame = parse_transform(inertial)
com = inertial_frame.p
I_m = np.zeros((3, 3))
I_m[0, 0] = float(inertial.find("inertia").get("ixx") or "0") * scale**2
I_m[1, 1] = float(inertial.find("inertia").get("iyy") or "0") * scale**2
I_m[2, 2] = float(inertial.find("inertia").get("izz") or "0") * scale**2
I_m[0, 1] = float(inertial.find("inertia").get("ixy") or "0") * scale**2
I_m[0, 2] = float(inertial.find("inertia").get("ixz") or "0") * scale**2
I_m[1, 2] = float(inertial.find("inertia").get("iyz") or "0") * scale**2
I_m[1, 0] = I_m[0, 1]
I_m[2, 0] = I_m[0, 2]
I_m[2, 1] = I_m[1, 2]
rot = wp.quat_to_matrix(inertial_frame.q)
I_m = rot @ wp.mat33(I_m)
m = float(inertial.find("mass").get("value") or "0")
builder.body_mass[link] = m
builder.body_inv_mass[link] = 1.0 / m
builder.body_com[link] = com
builder.body_inertia[link] = I_m
builder.body_inv_inertia[link] = wp.inverse(I_m)
if m == 0.0 and ensure_nonstatic_links:
# set the mass to something nonzero to ensure the body is dynamic
m = static_link_mass
# cube with side length 0.5
I_m = wp.mat33(np.eye(3)) * m / 12.0 * (0.5 * scale) ** 2 * 2.0
I_m += wp.mat33(armature * np.eye(3))
builder.body_mass[link] = m
builder.body_inv_mass[link] = 1.0 / m
builder.body_inertia[link] = I_m
builder.body_inv_inertia[link] = wp.inverse(I_m)
end_shape_count = len(builder.shape_geo_type)
# find joints per body
body_children = {name: [] for name in link_index.keys()}
# mapping from parent, child link names to joint
parent_child_joint = {}
joints = []
for joint in root.findall("joint"):
parent = joint.find("parent").get("link")
child = joint.find("child").get("link")
body_children[parent].append(child)
joint_data = {
"name": joint.get("name"),
"parent": parent,
"child": child,
"type": joint.get("type"),
"origin": parse_transform(joint),
"damping": damping,
"friction": 0.0,
"limit_lower": joint_limit_lower,
"limit_upper": joint_limit_upper,
}
if joint.find("axis") is not None:
joint_data["axis"] = joint.find("axis").get("xyz")
joint_data["axis"] = np.array([float(x) for x in joint_data["axis"].split()])
if joint.find("dynamics") is not None:
dynamics = joint.find("dynamics")
joint_data["damping"] = float(dynamics.get("damping") or str(damping))
joint_data["friction"] = float(dynamics.get("friction") or "0")
if joint.find("limit") is not None:
limit = joint.find("limit")
joint_data["limit_lower"] = float(limit.get("lower") or "-1e6")
joint_data["limit_upper"] = float(limit.get("upper") or "1e6")
if joint.find("mimic") is not None:
mimic = joint.find("mimic")
joint_data["mimic_joint"] = mimic.get("joint")
joint_data["mimic_multiplier"] = float(mimic.get("multiplier") or "1")
joint_data["mimic_offset"] = float(mimic.get("offset") or "0")
parent_child_joint[(parent, child)] = joint_data
joints.append(joint_data)
# topological sorting of joints because the FK solver will resolve body transforms
# in joint order and needs the parent link transform to be resolved before the child
visited = {name: False for name in link_index.keys()}
sorted_joints = []
# depth-first search
def dfs(joint):
link = joint["child"]
if visited[link]:
return
visited[link] = True
for child in body_children[link]:
if not visited[child]:
dfs(parent_child_joint[(link, child)])
sorted_joints.insert(0, joint)
# start DFS from each unvisited joint
for joint in joints:
if not visited[joint["parent"]]:
dfs(joint)
# add base joint
if len(sorted_joints) > 0:
base_link_name = sorted_joints[0]["parent"]
else:
base_link_name = next(iter(link_index.keys()))
root = link_index[base_link_name]
if base_joint is not None:
# in case of a given base joint, the position is applied first, the rotation only
# after the base joint itself to not rotate its axis
base_parent_xform = wp.transform(xform.p, wp.quat_identity())
base_child_xform = wp.transform((0.0, 0.0, 0.0), wp.quat_inverse(xform.q))
if isinstance(base_joint, str):
axes = base_joint.lower().split(",")
axes = [ax.strip() for ax in axes]
linear_axes = [ax[-1] for ax in axes if ax[0] in {"l", "p"}]
angular_axes = [ax[-1] for ax in axes if ax[0] in {"a", "r"}]
axes = {
"x": [1.0, 0.0, 0.0],
"y": [0.0, 1.0, 0.0],
"z": [0.0, 0.0, 1.0],
}
builder.add_joint_d6(
linear_axes=[wp.sim.JointAxis(axes[a]) for a in linear_axes],
angular_axes=[wp.sim.JointAxis(axes[a]) for a in angular_axes],
parent_xform=base_parent_xform,
child_xform=base_child_xform,
parent=-1,
child=root,
name="base_joint",
)
elif isinstance(base_joint, dict):
base_joint["parent"] = -1
base_joint["child"] = root
base_joint["parent_xform"] = base_parent_xform
base_joint["child_xform"] = base_child_xform
base_joint["name"] = "base_joint"
builder.add_joint(**base_joint)
else:
raise ValueError(
"base_joint must be a comma-separated string of joint axes or a dict with joint parameters"
)
elif floating:
builder.add_joint_free(root, name="floating_base")
# set dofs to transform
start = builder.joint_q_start[root]
builder.joint_q[start + 0] = xform.p[0]
builder.joint_q[start + 1] = xform.p[1]
builder.joint_q[start + 2] = xform.p[2]
builder.joint_q[start + 3] = xform.q[0]
builder.joint_q[start + 4] = xform.q[1]
builder.joint_q[start + 5] = xform.q[2]
builder.joint_q[start + 6] = xform.q[3]
else:
builder.add_joint_fixed(-1, root, parent_xform=xform, name="fixed_base")
# add joints, in topological order starting from root body
for joint in sorted_joints:
parent = link_index[joint["parent"]]
child = link_index[joint["child"]]
if child == -1:
# we skipped the insertion of the child body
continue
lower = joint["limit_lower"]
upper = joint["limit_upper"]
joint_damping = joint["damping"]
parent_xform = joint["origin"]
child_xform = wp.transform_identity()
joint_mode = wp.sim.JOINT_MODE_FORCE
if stiffness > 0.0:
joint_mode = wp.sim.JOINT_MODE_TARGET_POSITION
joint_params = {
"parent": parent,
"child": child,
"parent_xform": parent_xform,
"child_xform": child_xform,
"name": joint["name"],
"armature": armature,
}
if joint["type"] == "revolute" or joint["type"] == "continuous":
builder.add_joint_revolute(
axis=joint["axis"],
target_ke=stiffness,
target_kd=joint_damping,
limit_lower=lower,
limit_upper=upper,
limit_ke=limit_ke,
limit_kd=limit_kd,
mode=joint_mode,
**joint_params,
)
elif joint["type"] == "prismatic":
builder.add_joint_prismatic(
axis=joint["axis"],
target_ke=stiffness,
target_kd=joint_damping,
limit_lower=lower * scale,
limit_upper=upper * scale,
limit_ke=limit_ke,
limit_kd=limit_kd,
mode=joint_mode,
**joint_params,
)
elif joint["type"] == "fixed":
builder.add_joint_fixed(**joint_params)
elif joint["type"] == "floating":
builder.add_joint_free(**joint_params)
elif joint["type"] == "planar":
# find plane vectors perpendicular to axis
axis = np.array(joint["axis"])
axis /= np.linalg.norm(axis)
# create helper vector that is not parallel to the axis
helper = np.array([1, 0, 0]) if np.allclose(axis, [0, 1, 0]) else np.array([0, 1, 0])
u = np.cross(helper, axis)
u /= np.linalg.norm(u)
v = np.cross(axis, u)
v /= np.linalg.norm(v)
builder.add_joint_d6(
linear_axes=[
wp.sim.JointAxis(
u, limit_lower=lower * scale, limit_upper=upper * scale, limit_ke=limit_ke, limit_kd=limit_kd
),
wp.sim.JointAxis(
v, limit_lower=lower * scale, limit_upper=upper * scale, limit_ke=limit_ke, limit_kd=limit_kd
),
],
**joint_params,
)
else:
raise Exception("Unsupported joint type: " + joint["type"])
for i in range(start_shape_count, end_shape_count):
for j in visual_shapes:
builder.shape_collision_filter_pairs.add((i, j))
if not enable_self_collisions:
for i in range(start_shape_count, end_shape_count):
for j in range(i + 1, end_shape_count):
builder.shape_collision_filter_pairs.add((i, j))
if collapse_fixed_joints:
builder.collapse_fixed_joints()
| 23,095 | Python | 42.009311 | 372 | 0.542368 |
NVIDIA/warp/warp/sim/inertia.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Helper functions for computing rigid body inertia properties."""
import math
from typing import List, Union
import numpy as np
import warp as wp
@wp.func
def triangle_inertia(
p: wp.vec3,
q: wp.vec3,
r: wp.vec3,
density: float,
com: wp.vec3,
# outputs
mass: wp.array(dtype=float, ndim=1),
inertia: wp.array(dtype=wp.mat33, ndim=1),
):
pcom = p - com
qcom = q - com
rcom = r - com
Dm = wp.mat33(pcom[0], qcom[0], rcom[0], pcom[1], qcom[1], rcom[1], pcom[2], qcom[2], rcom[2])
volume = wp.abs(wp.determinant(Dm) / 6.0)
# accumulate mass
wp.atomic_add(mass, 0, 4.0 * density * volume)
alpha = wp.sqrt(5.0) / 5.0
mid = (com + p + q + r) / 4.0
off_mid = mid - com
# displacement of quadrature point from COM
d0 = alpha * (p - mid) + off_mid
d1 = alpha * (q - mid) + off_mid
d2 = alpha * (r - mid) + off_mid
d3 = alpha * (com - mid) + off_mid
# accumulate inertia
identity = wp.mat33(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
I = wp.dot(d0, d0) * identity - wp.outer(d0, d0)
I += wp.dot(d1, d1) * identity - wp.outer(d1, d1)
I += wp.dot(d2, d2) * identity - wp.outer(d2, d2)
I += wp.dot(d3, d3) * identity - wp.outer(d3, d3)
wp.atomic_add(inertia, 0, (density * volume) * I)
return volume
@wp.kernel
def compute_solid_mesh_inertia(
# inputs
com: wp.vec3,
weight: float,
indices: wp.array(dtype=int, ndim=1),
vertices: wp.array(dtype=wp.vec3, ndim=1),
# outputs
mass: wp.array(dtype=float, ndim=1),
inertia: wp.array(dtype=wp.mat33, ndim=1),
volume: wp.array(dtype=float, ndim=1),
):
i = wp.tid()
p = vertices[indices[i * 3 + 0]]
q = vertices[indices[i * 3 + 1]]
r = vertices[indices[i * 3 + 2]]
vol = triangle_inertia(p, q, r, weight, com, mass, inertia)
wp.atomic_add(volume, 0, vol)
@wp.kernel
def compute_hollow_mesh_inertia(
# inputs
com: wp.vec3,
density: float,
indices: wp.array(dtype=int, ndim=1),
vertices: wp.array(dtype=wp.vec3, ndim=1),
thickness: wp.array(dtype=float, ndim=1),
# outputs
mass: wp.array(dtype=float, ndim=1),
inertia: wp.array(dtype=wp.mat33, ndim=1),
volume: wp.array(dtype=float, ndim=1),
):
tid = wp.tid()
i = indices[tid * 3 + 0]
j = indices[tid * 3 + 1]
k = indices[tid * 3 + 2]
vi = vertices[i]
vj = vertices[j]
vk = vertices[k]
normal = -wp.normalize(wp.cross(vj - vi, vk - vi))
ti = normal * thickness[i]
tj = normal * thickness[j]
tk = normal * thickness[k]
# wedge vertices
vi0 = vi - ti
vi1 = vi + ti
vj0 = vj - tj
vj1 = vj + tj
vk0 = vk - tk
vk1 = vk + tk
triangle_inertia(vi0, vj0, vk0, density, com, mass, inertia)
triangle_inertia(vj0, vk1, vk0, density, com, mass, inertia)
triangle_inertia(vj0, vj1, vk1, density, com, mass, inertia)
triangle_inertia(vj0, vi1, vj1, density, com, mass, inertia)
triangle_inertia(vj0, vi0, vi1, density, com, mass, inertia)
triangle_inertia(vj1, vi1, vk1, density, com, mass, inertia)
triangle_inertia(vi1, vi0, vk0, density, com, mass, inertia)
triangle_inertia(vi1, vk0, vk1, density, com, mass, inertia)
# compute volume
a = wp.length(wp.cross(vj - vi, vk - vi)) * 0.5
vol = a * (thickness[i] + thickness[j] + thickness[k]) / 3.0
wp.atomic_add(volume, 0, vol)
def compute_sphere_inertia(density: float, r: float) -> tuple:
"""Helper to compute mass and inertia of a solid sphere
Args:
density: The sphere density
r: The sphere radius
Returns:
A tuple of (mass, inertia) with inertia specified around the origin
"""
v = 4.0 / 3.0 * math.pi * r * r * r
m = density * v
Ia = 2.0 / 5.0 * m * r * r
I = wp.mat33([[Ia, 0.0, 0.0], [0.0, Ia, 0.0], [0.0, 0.0, Ia]])
return (m, wp.vec3(), I)
def compute_capsule_inertia(density: float, r: float, h: float) -> tuple:
"""Helper to compute mass and inertia of a solid capsule extending along the y-axis
Args:
density: The capsule density
r: The capsule radius
h: The capsule height (full height of the interior cylinder)
Returns:
A tuple of (mass, inertia) with inertia specified around the origin
"""
ms = density * (4.0 / 3.0) * math.pi * r * r * r
mc = density * math.pi * r * r * h
# total mass
m = ms + mc
# adapted from ODE
Ia = mc * (0.25 * r * r + (1.0 / 12.0) * h * h) + ms * (0.4 * r * r + 0.375 * r * h + 0.25 * h * h)
Ib = (mc * 0.5 + ms * 0.4) * r * r
I = wp.mat33([[Ia, 0.0, 0.0], [0.0, Ib, 0.0], [0.0, 0.0, Ia]])
return (m, wp.vec3(), I)
def compute_cylinder_inertia(density: float, r: float, h: float) -> tuple:
"""Helper to compute mass and inertia of a solid cylinder extending along the y-axis
Args:
density: The cylinder density
r: The cylinder radius
h: The cylinder height (extent along the y-axis)
Returns:
A tuple of (mass, inertia) with inertia specified around the origin
"""
m = density * math.pi * r * r * h
Ia = 1 / 12 * m * (3 * r * r + h * h)
Ib = 1 / 2 * m * r * r
I = wp.mat33([[Ia, 0.0, 0.0], [0.0, Ib, 0.0], [0.0, 0.0, Ia]])
return (m, wp.vec3(), I)
def compute_cone_inertia(density: float, r: float, h: float) -> tuple:
"""Helper to compute mass and inertia of a solid cone extending along the y-axis
Args:
density: The cone density
r: The cone radius
h: The cone height (extent along the y-axis)
Returns:
A tuple of (mass, inertia) with inertia specified around the origin
"""
m = density * math.pi * r * r * h / 3.0
Ia = 1 / 20 * m * (3 * r * r + 2 * h * h)
Ib = 3 / 10 * m * r * r
I = wp.mat33([[Ia, 0.0, 0.0], [0.0, Ib, 0.0], [0.0, 0.0, Ia]])
return (m, wp.vec3(), I)
def compute_box_inertia(density: float, w: float, h: float, d: float) -> tuple:
"""Helper to compute mass and inertia of a solid box
Args:
density: The box density
w: The box width along the x-axis
h: The box height along the y-axis
d: The box depth along the z-axis
Returns:
A tuple of (mass, inertia) with inertia specified around the origin
"""
v = w * h * d
m = density * v
Ia = 1.0 / 12.0 * m * (h * h + d * d)
Ib = 1.0 / 12.0 * m * (w * w + d * d)
Ic = 1.0 / 12.0 * m * (w * w + h * h)
I = wp.mat33([[Ia, 0.0, 0.0], [0.0, Ib, 0.0], [0.0, 0.0, Ic]])
return (m, wp.vec3(), I)
def compute_mesh_inertia(
density: float, vertices: list, indices: list, is_solid: bool = True, thickness: Union[List[float], float] = 0.001
) -> tuple:
"""Computes mass, center of mass, 3x3 inertia matrix, and volume for a mesh."""
com = wp.vec3(np.mean(vertices, 0))
indices = np.array(indices).flatten()
num_tris = len(indices) // 3
# compute signed inertia for each tetrahedron
# formed with the interior point, using an order-2
# quadrature: https://www.sciencedirect.com/science/article/pii/S0377042712001604#br000040
# Allocating for mass and inertia
I_warp = wp.zeros(1, dtype=wp.mat33)
mass_warp = wp.zeros(1, dtype=float)
vol_warp = wp.zeros(1, dtype=float)
if is_solid:
weight = 0.25
# alpha = math.sqrt(5.0) / 5.0
wp.launch(
kernel=compute_solid_mesh_inertia,
dim=num_tris,
inputs=[
com,
weight,
wp.array(indices, dtype=int),
wp.array(vertices, dtype=wp.vec3),
],
outputs=[mass_warp, I_warp, vol_warp],
)
else:
weight = 0.25 * density
if isinstance(thickness, float):
thickness = [thickness] * len(vertices)
wp.launch(
kernel=compute_hollow_mesh_inertia,
dim=num_tris,
inputs=[
com,
weight,
wp.array(indices, dtype=int),
wp.array(vertices, dtype=wp.vec3),
wp.array(thickness, dtype=float),
],
outputs=[mass_warp, I_warp, vol_warp],
)
# Extract mass and inertia and save to class attributes.
mass = float(mass_warp.numpy()[0] * density)
I = wp.mat33(*(I_warp.numpy()[0] * density))
volume = float(vol_warp.numpy()[0])
return mass, com, I, volume
def transform_inertia(m, I, p, q):
R = wp.quat_to_matrix(q)
# Steiner's theorem
return R @ I @ wp.transpose(R) + m * (wp.dot(p, p) * wp.mat33(np.eye(3)) - wp.outer(p, p))
| 9,074 | Python | 27.62776 | 118 | 0.573837 |
NVIDIA/warp/warp/sim/integrator.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import warp as wp
from .model import PARTICLE_FLAG_ACTIVE, Control, Model, State
@wp.kernel
def integrate_particles(
x: wp.array(dtype=wp.vec3),
v: wp.array(dtype=wp.vec3),
f: wp.array(dtype=wp.vec3),
w: wp.array(dtype=float),
particle_flags: wp.array(dtype=wp.uint32),
gravity: wp.vec3,
dt: float,
v_max: float,
x_new: wp.array(dtype=wp.vec3),
v_new: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
if (particle_flags[tid] & PARTICLE_FLAG_ACTIVE) == 0:
return
x0 = x[tid]
v0 = v[tid]
f0 = f[tid]
inv_mass = w[tid]
# simple semi-implicit Euler. v1 = v0 + a dt, x1 = x0 + v1 dt
v1 = v0 + (f0 * inv_mass + gravity * wp.step(-inv_mass)) * dt
# enforce velocity limit to prevent instability
v1_mag = wp.length(v1)
if v1_mag > v_max:
v1 *= v_max / v1_mag
x1 = x0 + v1 * dt
x_new[tid] = x1
v_new[tid] = v1
@wp.func
def integrate_rigid_body(
q: wp.transform,
qd: wp.spatial_vector,
f: wp.spatial_vector,
com: wp.vec3,
inertia: wp.mat33,
inv_mass: float,
inv_inertia: wp.mat33,
gravity: wp.vec3,
angular_damping: float,
dt: float,
):
# unpack transform
x0 = wp.transform_get_translation(q)
r0 = wp.transform_get_rotation(q)
# unpack spatial twist
w0 = wp.spatial_top(qd)
v0 = wp.spatial_bottom(qd)
# unpack spatial wrench
t0 = wp.spatial_top(f)
f0 = wp.spatial_bottom(f)
x_com = x0 + wp.quat_rotate(r0, com)
# linear part
v1 = v0 + (f0 * inv_mass + gravity * wp.nonzero(inv_mass)) * dt
x1 = x_com + v1 * dt
# angular part (compute in body frame)
wb = wp.quat_rotate_inv(r0, w0)
tb = wp.quat_rotate_inv(r0, t0) - wp.cross(wb, inertia * wb) # coriolis forces
w1 = wp.quat_rotate(r0, wb + inv_inertia * tb * dt)
r1 = wp.normalize(r0 + wp.quat(w1, 0.0) * r0 * 0.5 * dt)
# angular damping
w1 *= 1.0 - angular_damping * dt
q_new = wp.transform(x1 - wp.quat_rotate(r1, com), r1)
qd_new = wp.spatial_vector(w1, v1)
return q_new, qd_new
# semi-implicit Euler integration
@wp.kernel
def integrate_bodies(
body_q: wp.array(dtype=wp.transform),
body_qd: wp.array(dtype=wp.spatial_vector),
body_f: wp.array(dtype=wp.spatial_vector),
body_com: wp.array(dtype=wp.vec3),
m: wp.array(dtype=float),
I: wp.array(dtype=wp.mat33),
inv_m: wp.array(dtype=float),
inv_I: wp.array(dtype=wp.mat33),
gravity: wp.vec3,
angular_damping: float,
dt: float,
# outputs
body_q_new: wp.array(dtype=wp.transform),
body_qd_new: wp.array(dtype=wp.spatial_vector),
):
tid = wp.tid()
# positions
q = body_q[tid]
qd = body_qd[tid]
f = body_f[tid]
# masses
inv_mass = inv_m[tid] # 1 / mass
inertia = I[tid]
inv_inertia = inv_I[tid] # inverse of 3x3 inertia matrix
com = body_com[tid]
q_new, qd_new = integrate_rigid_body(
q,
qd,
f,
com,
inertia,
inv_mass,
inv_inertia,
gravity,
angular_damping,
dt,
)
body_q_new[tid] = q_new
body_qd_new[tid] = qd_new
class Integrator:
"""
Generic base class for integrators. Provides methods to integrate rigid bodies and particles.
"""
def integrate_bodies(
self,
model: Model,
state_in: State,
state_out: State,
dt: float,
angular_damping: float = 0.0,
):
"""
Integrate the rigid bodies of the model.
Args:
model (Model): The model to integrate.
state_in (State): The input state.
state_out (State): The output state.
dt (float): The time step (typically in seconds).
angular_damping (float, optional): The angular damping factor. Defaults to 0.0.
"""
if model.body_count:
wp.launch(
kernel=integrate_bodies,
dim=model.body_count,
inputs=[
state_in.body_q,
state_in.body_qd,
state_in.body_f,
model.body_com,
model.body_mass,
model.body_inertia,
model.body_inv_mass,
model.body_inv_inertia,
model.gravity,
angular_damping,
dt,
],
outputs=[state_out.body_q, state_out.body_qd],
device=model.device,
)
def integrate_particles(
self,
model: Model,
state_in: State,
state_out: State,
dt: float,
):
"""
Integrate the particles of the model.
Args:
model (Model): The model to integrate.
state_in (State): The input state.
state_out (State): The output state.
dt (float): The time step (typically in seconds).
"""
if model.particle_count:
wp.launch(
kernel=integrate_particles,
dim=model.particle_count,
inputs=[
state_in.particle_q,
state_in.particle_qd,
state_in.particle_f,
model.particle_inv_mass,
model.particle_flags,
model.gravity,
dt,
model.particle_max_velocity,
],
outputs=[state_out.particle_q, state_out.particle_qd],
device=model.device,
)
def simulate(self, model: Model, state_in: State, state_out: State, dt: float, control: Control = None):
"""
Simulate the model for a given time step using the given control input.
Args:
model (Model): The model to simulate.
state_in (State): The input state.
state_out (State): The output state.
dt (float): The time step (typically in seconds).
control (Control): The control input. Defaults to `None` which means the control values from the :class:`Model` are used.
"""
raise NotImplementedError()
| 6,652 | Python | 27.310638 | 133 | 0.555021 |
NVIDIA/warp/warp/fem/polynomial.py | import math
from enum import Enum
import numpy as np
class Polynomial(Enum):
"""Polynomial family defining interpolation nodes over an interval"""
GAUSS_LEGENDRE = 0
"""Gauss--Legendre 1D polynomial family (does not include endpoints)"""
LOBATTO_GAUSS_LEGENDRE = 1
"""Lobatto--Gauss--Legendre 1D polynomial family (includes endpoints)"""
EQUISPACED_CLOSED = 2
"""Closed 1D polynomial family with uniformly distributed nodes (includes endpoints)"""
EQUISPACED_OPEN = 3
"""Open 1D polynomial family with uniformly distributed nodes (does not include endpoints)"""
def __str__(self):
return self.name
def is_closed(family: Polynomial):
"""Whether the polynomial roots include interval endpoints"""
return family == Polynomial.LOBATTO_GAUSS_LEGENDRE or family == Polynomial.EQUISPACED_CLOSED
def _gauss_legendre_quadrature_1d(n: int):
if n == 1:
coords = [0.0]
weights = [2.0]
elif n == 2:
coords = [-math.sqrt(1.0 / 3), math.sqrt(1.0 / 3)]
weights = [1.0, 1.0]
elif n == 3:
coords = [0.0, -math.sqrt(3.0 / 5.0), math.sqrt(3.0 / 5.0)]
weights = [8.0 / 9.0, 5.0 / 9.0, 5.0 / 9.0]
elif n == 4:
c_a = math.sqrt(3.0 / 7.0 - 2.0 / 7.0 * math.sqrt(6.0 / 5.0))
c_b = math.sqrt(3.0 / 7.0 + 2.0 / 7.0 * math.sqrt(6.0 / 5.0))
w_a = (18.0 + math.sqrt(30.0)) / 36.0
w_b = (18.0 - math.sqrt(30.0)) / 36.0
coords = [c_a, -c_a, c_b, -c_b]
weights = [w_a, w_a, w_b, w_b]
elif n == 5:
c_a = 1.0 / 3.0 * math.sqrt(5.0 - 2.0 * math.sqrt(10.0 / 7.0))
c_b = 1.0 / 3.0 * math.sqrt(5.0 + 2.0 * math.sqrt(10.0 / 7.0))
w_a = (322.0 + 13.0 * math.sqrt(70.0)) / 900.0
w_b = (322.0 - 13.0 * math.sqrt(70.0)) / 900.0
coords = [0.0, c_a, -c_a, c_b, -c_b]
weights = [128.0 / 225.0, w_a, w_a, w_b, w_b]
else:
raise NotImplementedError
# Shift from [-1, 1] to [0, 1]
weights = 0.5 * np.array(weights)
coords = 0.5 * np.array(coords) + 0.5
return coords, weights
def _lobatto_gauss_legendre_quadrature_1d(n: int):
if n == 2:
coords = [-1.0, 1.0]
weights = [1.0, 1.0]
elif n == 3:
coords = [-1.0, 0.0, 1.0]
weights = [1.0 / 3.0, 4.0 / 3.0, 1.0 / 3.0]
elif n == 4:
coords = [-1.0, -1.0 / math.sqrt(5.0), 1.0 / math.sqrt(5.0), 1.0]
weights = [1.0 / 6.0, 5.0 / 6.0, 5.0 / 6.0, 1.0 / 6.0]
elif n == 5:
coords = [-1.0, -math.sqrt(3.0 / 7.0), 0.0, math.sqrt(3.0 / 7.0), 1.0]
weights = [1.0 / 10.0, 49.0 / 90.0, 32.0 / 45.0, 49.0 / 90.0, 1.0 / 10.0]
else:
raise NotImplementedError
# Shift from [-1, 1] to [0, 1]
weights = 0.5 * np.array(weights)
coords = 0.5 * np.array(coords) + 0.5
return coords, weights
def _uniform_open_quadrature_1d(n: int):
step = 1.0 / (n + 1)
coords = np.linspace(step, 1.0 - step, n)
weights = np.full(n, 1.0 / (n + 1))
# Boundaries have 3/2 the weight
weights[0] = 1.5 / (n + 1)
weights[-1] = 1.5 / (n + 1)
return coords, weights
def _uniform_closed_quadrature_1d(n: int):
coords = np.linspace(0.0, 1.0, n)
weights = np.full(n, 1.0 / (n - 1))
# Boundaries have half the weight
weights[0] = 0.5 / (n - 1)
weights[-1] = 0.5 / (n - 1)
return coords, weights
def _open_newton_cotes_quadrature_1d(n: int):
step = 1.0 / (n + 1)
coords = np.linspace(step, 1.0 - step, n)
# Weisstein, Eric W. "Newton-Cotes Formulas." From MathWorld--A Wolfram Web Resource.
# https://mathworld.wolfram.com/Newton-CotesFormulas.html
if n == 1:
weights = np.array([1.0])
elif n == 2:
weights = np.array([0.5, 0.5])
elif n == 3:
weights = np.array([2.0, -1.0, 2.0]) / 3.0
elif n == 4:
weights = np.array([11.0, 1.0, 1.0, 11.0]) / 24.0
elif n == 5:
weights = np.array([11.0, -14.0, 26.0, -14.0, 11.0]) / 20.0
elif n == 6:
weights = np.array([611.0, -453.0, 562.0, 562.0, -453.0, 611.0]) / 1440.0
elif n == 7:
weights = np.array([460.0, -954.0, 2196.0, -2459.0, 2196.0, -954.0, 460.0]) / 945.0
else:
raise NotImplementedError
return coords, weights
def _closed_newton_cotes_quadrature_1d(n: int):
coords = np.linspace(0.0, 1.0, n)
# OEIS: A093735, A093736
if n == 2:
weights = np.array([1.0, 1.0]) / 2.0
elif n == 3:
weights = np.array([1.0, 4.0, 1.0]) / 3.0
elif n == 4:
weights = np.array([3.0, 9.0, 9.0, 3.0]) / 8.0
elif n == 5:
weights = np.array([14.0, 64.0, 24.0, 64.0, 14.0]) / 45.0
elif n == 6:
weights = np.array([95.0 / 288.0, 125.0 / 96.0, 125.0 / 144.0, 125.0 / 144.0, 125.0 / 96.0, 95.0 / 288.0])
elif n == 7:
weights = np.array([41, 54, 27, 68, 27, 54, 41], dtype=float) / np.array(
[140, 35, 140, 35, 140, 35, 140], dtype=float
)
elif n == 8:
weights = np.array(
[
5257,
25039,
343,
20923,
20923,
343,
25039,
5257,
]
) / np.array(
[
17280,
17280,
640,
17280,
17280,
640,
17280,
17280,
],
dtype=float,
)
else:
raise NotImplementedError
# Normalize with interval length
weights = weights / (n - 1)
return coords, weights
def quadrature_1d(point_count: int, family: Polynomial):
"""Return quadrature points and weights for the given family and point count"""
if family == Polynomial.GAUSS_LEGENDRE:
return _gauss_legendre_quadrature_1d(point_count)
if family == Polynomial.LOBATTO_GAUSS_LEGENDRE:
return _lobatto_gauss_legendre_quadrature_1d(point_count)
if family == Polynomial.EQUISPACED_CLOSED:
return _closed_newton_cotes_quadrature_1d(point_count)
if family == Polynomial.EQUISPACED_OPEN:
return _open_newton_cotes_quadrature_1d(point_count)
raise NotImplementedError
def lagrange_scales(coords: np.array):
"""Return the scaling factors for Lagrange polynomials with roots at coords"""
lagrange_scale = np.empty_like(coords)
for i in range(len(coords)):
deltas = coords[i] - coords
deltas[i] = 1.0
lagrange_scale[i] = 1.0 / np.prod(deltas)
return lagrange_scale
| 6,565 | Python | 29.539535 | 114 | 0.529322 |
NVIDIA/warp/warp/fem/cache.py | import bisect
import re
from copy import copy
from typing import Any, Callable, Dict, Optional, Tuple, Union
import warp as wp
_kernel_cache = {}
_struct_cache = {}
_func_cache = {}
_key_re = re.compile("[^0-9a-zA-Z_]+")
def _make_key(obj, suffix: str, use_qualified_name):
base_name = f"{obj.__module__}.{obj.__qualname__}" if use_qualified_name else obj.__name__
return _key_re.sub("", f"{base_name}_{suffix}")
def get_func(func, suffix: str, use_qualified_name: bool = False):
key = _make_key(func, suffix, use_qualified_name)
if key not in _func_cache:
_func_cache[key] = wp.Function(
func=func,
key=key,
namespace="",
module=wp.get_module(
func.__module__,
),
)
return _func_cache[key]
def dynamic_func(suffix: str, use_qualified_name=False):
def wrap_func(func: Callable):
return get_func(func, suffix=suffix, use_qualified_name=use_qualified_name)
return wrap_func
def get_kernel(
func,
suffix: str,
use_qualified_name: bool = False,
kernel_options: Dict[str, Any] = None,
):
if kernel_options is None:
kernel_options = {}
key = _make_key(func, suffix, use_qualified_name)
if key not in _kernel_cache:
# Avoid creating too long file names -- can lead to issues on Windows
# We could hash the key, but prefer to keep it human-readable
module_name = f"{func.__module__}.dyn.{key}"
module_name = module_name[:128] if len(module_name) > 128 else module_name
module = wp.get_module(module_name)
module.options = copy(wp.get_module(func.__module__).options)
module.options.update(kernel_options)
_kernel_cache[key] = wp.Kernel(func=func, key=key, module=module)
return _kernel_cache[key]
def dynamic_kernel(suffix: str, use_qualified_name=False, kernel_options: Dict[str, Any] = None):
if kernel_options is None:
kernel_options = {}
def wrap_kernel(func: Callable):
return get_kernel(func, suffix=suffix, use_qualified_name=use_qualified_name, kernel_options=kernel_options)
return wrap_kernel
def get_struct(struct: type, suffix: str, use_qualified_name: bool = False):
key = _make_key(struct, suffix, use_qualified_name)
# used in codegen
struct.__qualname__ = key
if key not in _struct_cache:
module = wp.get_module(struct.__module__)
_struct_cache[key] = wp.codegen.Struct(
cls=struct,
key=key,
module=module,
)
return _struct_cache[key]
def dynamic_struct(suffix: str, use_qualified_name=False):
def wrap_struct(struct: type):
return get_struct(struct, suffix=suffix, use_qualified_name=use_qualified_name)
return wrap_struct
def get_integrand_function(
integrand: "warp.fem.operator.Integrand", # noqa: F821
suffix: str,
func=None,
annotations=None,
code_transformers=None,
):
if code_transformers is None:
code_transformers = []
key = _make_key(integrand.func, suffix, use_qualified_name=True)
if key not in _func_cache:
_func_cache[key] = wp.Function(
func=integrand.func if func is None else func,
key=key,
namespace="",
module=integrand.module,
overloaded_annotations=annotations,
code_transformers=code_transformers,
)
return _func_cache[key]
def get_integrand_kernel(
integrand: "warp.fem.operator.Integrand", # noqa: F821
suffix: str,
kernel_fn: Optional[Callable] = None,
kernel_options: Dict[str, Any] = None,
code_transformers=None,
):
if kernel_options is None:
kernel_options = {}
if code_transformers is None:
code_transformers = []
key = _make_key(integrand.func, suffix, use_qualified_name=True)
if key not in _kernel_cache:
if kernel_fn is None:
return None
module = wp.get_module(f"{integrand.module.name}.{integrand.name}")
module.options = copy(integrand.module.options)
module.options.update(kernel_options)
_kernel_cache[key] = wp.Kernel(func=kernel_fn, key=key, module=module, code_transformers=code_transformers)
return _kernel_cache[key]
def cached_arg_value(func: Callable):
"""Decorator to be applied to member methods assembling Arg structs, so that the result gets
automatically cached for the lifetime of the parent object
"""
cache_attr = f"_{func.__name__}_cache"
def get_arg(obj, device):
if not hasattr(obj, cache_attr):
setattr(obj, cache_attr, {})
cache = getattr(obj, cache_attr, {})
device = wp.get_device(device)
if device.ordinal not in cache:
cache[device.ordinal] = func(obj, device)
return cache[device.ordinal]
return get_arg
_cached_vec_types = {}
_cached_mat_types = {}
def cached_vec_type(length, dtype):
key = (length, dtype)
if key not in _cached_vec_types:
_cached_vec_types[key] = wp.vec(length=length, dtype=dtype)
return _cached_vec_types[key]
def cached_mat_type(shape, dtype):
key = (*shape, dtype)
if key not in _cached_mat_types:
_cached_mat_types[key] = wp.mat(shape=shape, dtype=dtype)
return _cached_mat_types[key]
class Temporary:
"""Handle over a temporary array from a :class:`TemporaryStore`.
The array will be automatically returned to the temporary pool for reuse upon destruction of this object, unless
the temporary is explicitly detached from the pool using :meth:`detach`.
The temporary may also be explicitly returned to the pool before destruction using :meth:`release`.
"""
def __init__(self, array: wp.array, pool: Optional["TemporaryStore.Pool"] = None, shape=None, dtype=None):
self._raw_array = array
self._array_view = array
self._pool = pool
if shape is not None or dtype is not None:
self._view_as(shape=shape, dtype=dtype)
def detach(self) -> wp.array:
"""Detaches the temporary so it is never returned to the pool"""
if self._pool is not None:
self._pool.detach(self._raw_array)
self._pool = None
return self._array_view
def release(self):
"""Returns the temporary array to the pool"""
if self._pool is not None:
self._pool.redeem(self._raw_array)
self._pool = None
@property
def array(self) -> wp.array:
"""View of the array with desired shape and data type."""
return self._array_view
def _view_as(self, shape, dtype) -> "Temporary":
def _view_reshaped_truncated(array):
view = wp.types.array(
ptr=array.ptr,
dtype=dtype,
shape=shape,
device=array.device,
pinned=array.pinned,
capacity=array.capacity,
copy=False,
grad=None if array.grad is None else _view_reshaped_truncated(array.grad),
)
view._ref = array
return view
self._array_view = _view_reshaped_truncated(self._raw_array)
return self
def __del__(self):
self.release()
class TemporaryStore:
"""
Shared pool of temporary arrays that will be persisted and reused across invocations of ``warp.fem`` functions.
A :class:`TemporaryStore` instance may either be passed explicitly to ``warp.fem`` functions that accept such an argument, for instance :func:`.integrate.integrate`,
or can be set globally as the default store using :func:`set_default_temporary_store`.
By default, there is no default temporary store, so that temporary allocations are not persisted.
"""
_default_store: "TemporaryStore" = None
class Pool:
def __init__(self, dtype, device, pinned: bool):
self.dtype = dtype
self.device = device
self.pinned = pinned
self._pool = [] # Currently available arrays for borrowing, ordered by size
self._pool_sizes = [] # Sizes of available arrays for borrowing, ascending
self._allocs = {} # All allocated arrays, including borrowed ones
def borrow(self, shape, dtype, requires_grad: bool):
size = 1
if isinstance(shape, int):
shape = (shape,)
for d in shape:
size *= d
index = bisect.bisect_left(
a=self._pool_sizes,
x=size,
)
if index < len(self._pool):
# Big enough array found, remove from pool
array = self._pool.pop(index)
self._pool_sizes.pop(index)
if requires_grad and array.grad is None:
array.requires_grad = True
return Temporary(pool=self, array=array, shape=shape, dtype=dtype)
# No big enough array found, allocate new one
if len(self._pool) > 0:
grow_factor = 1.5
size = max(int(self._pool_sizes[-1] * grow_factor), size)
array = wp.empty(
shape=(size,), dtype=self.dtype, pinned=self.pinned, device=self.device, requires_grad=requires_grad
)
self._allocs[array.ptr] = array
return Temporary(pool=self, array=array, shape=shape, dtype=dtype)
def redeem(self, array):
# Insert back array into available pool
index = bisect.bisect_left(
a=self._pool_sizes,
x=array.size,
)
self._pool.insert(index, array)
self._pool_sizes.insert(index, array.size)
def detach(self, array):
del self._allocs[array.ptr]
def __init__(self):
self.clear()
def clear(self):
self._temporaries = {}
def borrow(self, shape, dtype, pinned: bool = False, device=None, requires_grad: bool = False) -> Temporary:
dtype = wp.types.type_to_warp(dtype)
device = wp.get_device(device)
type_length = wp.types.type_length(dtype)
key = (dtype._type_, type_length, pinned, device.ordinal)
pool = self._temporaries.get(key, None)
if pool is None:
value_type = (
cached_vec_type(length=type_length, dtype=wp.types.type_scalar_type(dtype))
if type_length > 1
else dtype
)
pool = TemporaryStore.Pool(value_type, device, pinned=pinned)
self._temporaries[key] = pool
return pool.borrow(dtype=dtype, shape=shape, requires_grad=requires_grad)
def set_default_temporary_store(temporary_store: Optional[TemporaryStore]):
"""Globally sets the default :class:`TemporaryStore` instance to use for temporary allocations in ``warp.fem`` functions.
If the default temporary store is set to ``None``, temporary allocations are not persisted unless a :class:`TemporaryStore` is provided at a per-function granularity.
"""
TemporaryStore._default_store = temporary_store
def borrow_temporary(
temporary_store: Optional[TemporaryStore],
shape: Union[int, Tuple[int]],
dtype: type,
pinned: bool = False,
requires_grad: bool = False,
device=None,
) -> Temporary:
"""
Borrows and returns a temporary array with specified attributes from a shared pool.
If an array with sufficient capacity and matching desired attributes is already available in the pool, it will be returned.
Otherwise, a new allocation will be performed.
Args:
temporary_store: the shared pool to borrow the temporary from. If `temporary_store` is ``None``, the global default temporary store, if set, will be used.
shape: desired dimensions for the temporary array
dtype: desired data type for the temporary array
pinned: whether a pinned allocation is desired
device: device on which the memory should be allocated; if ``None``, the current device will be used.
"""
if temporary_store is None:
temporary_store = TemporaryStore._default_store
if temporary_store is None:
return Temporary(
array=wp.empty(shape=shape, dtype=dtype, pinned=pinned, device=device, requires_grad=requires_grad)
)
return temporary_store.borrow(shape=shape, dtype=dtype, device=device, pinned=pinned, requires_grad=requires_grad)
def borrow_temporary_like(
array: Union[wp.array, Temporary],
temporary_store: Optional[TemporaryStore],
) -> Temporary:
"""
Borrows and returns a temporary array with the same attributes as another array or temporary.
Args:
array: Warp or temporary array to read the desired attributes from
temporary_store: the shared pool to borrow the temporary from. If `temporary_store` is ``None``, the global default temporary store, if set, will be used.
"""
if isinstance(array, Temporary):
array = array.array
return borrow_temporary(
temporary_store=temporary_store,
shape=array.shape,
dtype=array.dtype,
pinned=array.pinned,
device=array.device,
requires_grad=array.requires_grad,
)
| 13,321 | Python | 31.975247 | 170 | 0.622251 |
NVIDIA/warp/warp/fem/__init__.py | from .cache import TemporaryStore, borrow_temporary, borrow_temporary_like, set_default_temporary_store
from .dirichlet import normalize_dirichlet_projector, project_linear_system
from .domain import BoundarySides, Cells, FrontierSides, GeometryDomain, Sides
from .field import DiscreteField, FieldLike, make_restriction, make_test, make_trial
from .geometry import (
ExplicitGeometryPartition,
Geometry,
GeometryPartition,
Grid2D,
Grid3D,
Hexmesh,
LinearGeometryPartition,
Nanogrid,
Quadmesh2D,
Tetmesh,
Trimesh2D,
)
from .integrate import integrate, interpolate
from .operator import (
D,
at_node,
average,
curl,
deformation_gradient,
degree,
div,
div_outer,
grad,
grad_average,
grad_jump,
grad_outer,
inner,
integrand,
jump,
lookup,
measure,
measure_ratio,
normal,
outer,
position,
)
from .polynomial import Polynomial
from .quadrature import ExplicitQuadrature, NodalQuadrature, PicQuadrature, Quadrature, RegularQuadrature
from .space import (
BasisSpace,
DofMapper,
ElementBasis,
FunctionSpace,
PointBasisSpace,
SkewSymmetricTensorMapper,
SpacePartition,
SpaceRestriction,
SpaceTopology,
SymmetricTensorMapper,
make_collocated_function_space,
make_polynomial_basis_space,
make_polynomial_space,
make_space_partition,
make_space_restriction,
)
from .types import Coords, Domain, ElementIndex, Field, Sample
| 1,500 | Python | 23.209677 | 105 | 0.72 |
NVIDIA/warp/warp/fem/utils.py | from typing import Any, Tuple
import numpy as np
import warp as wp
from warp.fem.cache import (
Temporary,
TemporaryStore,
borrow_temporary,
borrow_temporary_like,
)
from warp.utils import array_scan, radix_sort_pairs, runlength_encode
@wp.func
def generalized_outer(x: Any, y: Any):
"""Generalized outer product allowing for the first argument to be a scalar"""
return wp.outer(x, y)
@wp.func
def generalized_outer(x: wp.float32, y: wp.vec2):
return x * y
@wp.func
def generalized_outer(x: wp.float32, y: wp.vec3):
return x * y
@wp.func
def generalized_inner(x: Any, y: Any):
"""Generalized inner product allowing for the first argument to be a tensor"""
return wp.dot(x, y)
@wp.func
def generalized_inner(x: wp.mat22, y: wp.vec2):
return x[0] * y[0] + x[1] * y[1]
@wp.func
def generalized_inner(x: wp.mat33, y: wp.vec3):
return x[0] * y[0] + x[1] * y[1] + x[2] * y[2]
@wp.func
def apply_right(x: Any, y: Any):
"""Performs x y multiplication with y a square matrix and x either a row-vector or a matrix.
Will be removed once native @ operator is implemented.
"""
return x * y
@wp.func
def apply_right(x: wp.vec2, y: wp.mat22):
return x[0] * y[0] + x[1] * y[1]
@wp.func
def apply_right(x: wp.vec3, y: wp.mat33):
return x[0] * y[0] + x[1] * y[1] + x[2] * y[2]
@wp.func
def unit_element(template_type: Any, coord: int):
"""Returns a instance of `template_type` with a single coordinate set to 1 in the canonical basis"""
t = type(template_type)(0.0)
t[coord] = 1.0
return t
@wp.func
def unit_element(template_type: wp.float32, coord: int):
return 1.0
@wp.func
def unit_element(template_type: wp.mat22, coord: int):
t = wp.mat22(0.0)
row = coord // 2
col = coord - 2 * row
t[row, col] = 1.0
return t
@wp.func
def unit_element(template_type: wp.mat33, coord: int):
t = wp.mat33(0.0)
row = coord // 3
col = coord - 3 * row
t[row, col] = 1.0
return t
@wp.func
def symmetric_part(x: Any):
"""Symmetric part of a square tensor"""
return 0.5 * (x + wp.transpose(x))
@wp.func
def skew_part(x: wp.mat22):
"""Skew part of a 2x2 tensor as corresponding rotation angle"""
return 0.5 * (x[1, 0] - x[0, 1])
@wp.func
def skew_part(x: wp.mat33):
"""Skew part of a 3x3 tensor as the corresponding rotation vector"""
a = 0.5 * (x[2, 1] - x[1, 2])
b = 0.5 * (x[0, 2] - x[2, 0])
c = 0.5 * (x[1, 0] - x[0, 1])
return wp.vec3(a, b, c)
def compress_node_indices(
node_count: int, node_indices: wp.array(dtype=int), temporary_store: TemporaryStore = None
) -> Tuple[Temporary, Temporary, int, Temporary]:
"""
Compress an unsorted list of node indices into:
- a node_offsets array, giving for each node the start offset of corresponding indices in sorted_array_indices
- a sorted_array_indices array, listing the indices in the input array corresponding to each node
- the number of unique node indices
- a unique_node_indices array containing the sorted list of unique node indices (i.e. the list of indices i for which node_offsets[i] < node_offsets[i+1])
"""
index_count = node_indices.size
sorted_node_indices_temp = borrow_temporary(
temporary_store, shape=2 * index_count, dtype=int, device=node_indices.device
)
sorted_array_indices_temp = borrow_temporary_like(sorted_node_indices_temp, temporary_store)
sorted_node_indices = sorted_node_indices_temp.array
sorted_array_indices = sorted_array_indices_temp.array
wp.copy(dest=sorted_node_indices, src=node_indices, count=index_count)
indices_per_element = 1 if node_indices.ndim == 1 else node_indices.shape[-1]
wp.launch(
kernel=_iota_kernel,
dim=index_count,
inputs=[sorted_array_indices, indices_per_element],
device=sorted_array_indices.device,
)
# Sort indices
radix_sort_pairs(sorted_node_indices, sorted_array_indices, count=index_count)
# Build prefix sum of number of elements per node
unique_node_indices_temp = borrow_temporary(
temporary_store, shape=index_count, dtype=int, device=node_indices.device
)
node_element_counts_temp = borrow_temporary(
temporary_store, shape=index_count, dtype=int, device=node_indices.device
)
unique_node_indices = unique_node_indices_temp.array
node_element_counts = node_element_counts_temp.array
unique_node_count_dev = borrow_temporary(temporary_store, shape=(1,), dtype=int, device=sorted_node_indices.device)
runlength_encode(
sorted_node_indices,
unique_node_indices,
node_element_counts,
value_count=index_count,
run_count=unique_node_count_dev.array,
)
# Transfer unique node count to host
if node_indices.device.is_cuda:
unique_node_count_host = borrow_temporary(temporary_store, shape=(1,), dtype=int, pinned=True, device="cpu")
wp.copy(src=unique_node_count_dev.array, dest=unique_node_count_host.array, count=1)
wp.synchronize_stream(wp.get_stream(node_indices.device))
unique_node_count_dev.release()
unique_node_count = int(unique_node_count_host.array.numpy()[0])
unique_node_count_host.release()
else:
unique_node_count = int(unique_node_count_dev.array.numpy()[0])
unique_node_count_dev.release()
# Scatter seen run counts to global array of element count per node
node_offsets_temp = borrow_temporary(
temporary_store, shape=(node_count + 1), device=node_element_counts.device, dtype=int
)
node_offsets = node_offsets_temp.array
node_offsets.zero_()
wp.launch(
kernel=_scatter_node_counts,
dim=unique_node_count,
inputs=[node_element_counts, unique_node_indices, node_offsets],
device=node_offsets.device,
)
# Prefix sum of number of elements per node
array_scan(node_offsets, node_offsets, inclusive=True)
sorted_node_indices_temp.release()
node_element_counts_temp.release()
return node_offsets_temp, sorted_array_indices_temp, unique_node_count, unique_node_indices_temp
def masked_indices(
mask: wp.array, missing_index=-1, temporary_store: TemporaryStore = None
) -> Tuple[Temporary, Temporary]:
"""
From an array of boolean masks (must be either 0 or 1), returns:
- The list of indices for which the mask is 1
- A map associating to each element of the input mask array its local index if non-zero, or missing_index if zero.
"""
offsets_temp = borrow_temporary_like(mask, temporary_store)
offsets = offsets_temp.array
wp.utils.array_scan(mask, offsets, inclusive=True)
# Get back total counts on host
if offsets.device.is_cuda:
masked_count_temp = borrow_temporary(temporary_store, shape=1, dtype=int, pinned=True, device="cpu")
wp.copy(dest=masked_count_temp.array, src=offsets, src_offset=offsets.shape[0] - 1, count=1)
wp.synchronize_stream(wp.get_stream(offsets.device))
masked_count = int(masked_count_temp.array.numpy()[0])
masked_count_temp.release()
else:
masked_count = int(offsets.numpy()[-1])
# Convert counts to indices
indices_temp = borrow_temporary(temporary_store, shape=masked_count, device=mask.device, dtype=int)
wp.launch(
kernel=_masked_indices_kernel,
dim=offsets.shape,
inputs=[missing_index, mask, offsets, indices_temp.array, offsets],
device=mask.device,
)
return indices_temp, offsets_temp
def array_axpy(x: wp.array, y: wp.array, alpha: float = 1.0, beta: float = 1.0):
"""Performs y = alpha*x + beta*y"""
dtype = wp.types.type_scalar_type(x.dtype)
alpha = dtype(alpha)
beta = dtype(beta)
if not wp.types.types_equal(x.dtype, y.dtype) or x.shape != y.shape or x.device != y.device:
raise ValueError("x and y arrays must have same dat atype, shape and device")
wp.launch(kernel=_array_axpy_kernel, dim=x.shape, device=x.device, inputs=[x, y, alpha, beta])
@wp.kernel
def _iota_kernel(indices: wp.array(dtype=int), divisor: int):
indices[wp.tid()] = wp.tid() // divisor
@wp.kernel
def _scatter_node_counts(
unique_counts: wp.array(dtype=int), unique_node_indices: wp.array(dtype=int), node_counts: wp.array(dtype=int)
):
i = wp.tid()
node_counts[1 + unique_node_indices[i]] = unique_counts[i]
@wp.kernel
def _masked_indices_kernel(
missing_index: int,
mask: wp.array(dtype=int),
offsets: wp.array(dtype=int),
masked_to_global: wp.array(dtype=int),
global_to_masked: wp.array(dtype=int),
):
i = wp.tid()
if mask[i] == 0:
global_to_masked[i] = missing_index
else:
masked_idx = offsets[i] - 1
global_to_masked[i] = masked_idx
masked_to_global[masked_idx] = i
@wp.kernel
def _array_axpy_kernel(x: wp.array(dtype=Any), y: wp.array(dtype=Any), alpha: Any, beta: Any):
i = wp.tid()
y[i] = beta * y[i] + alpha * x[i]
def grid_to_tris(Nx: int, Ny: int):
"""Constructs a triangular mesh topology by dividing each cell of a dense 2D grid into two triangles.
The resulting triangles will be oriented counter-clockwise assuming that `y` is the fastest moving index direction
Args:
Nx: Resolution of the grid along `x` dimension
Ny: Resolution of the grid along `y` dimension
Returns:
Array of shape (2 * Nx * Ny, 3) containing vertex indices for each triangle
"""
cx, cy = np.meshgrid(np.arange(Nx, dtype=int), np.arange(Ny, dtype=int), indexing="ij")
vidx = np.transpose(
np.array(
[
(Ny + 1) * cx + cy,
(Ny + 1) * (cx + 1) + cy,
(Ny + 1) * (cx + 1) + (cy + 1),
(Ny + 1) * cx + cy,
(Ny + 1) * (cx + 1) + (cy + 1),
(Ny + 1) * (cx) + (cy + 1),
]
)
).reshape((-1, 3))
return vidx
def grid_to_tets(Nx: int, Ny: int, Nz: int):
"""Constructs a tetrahedral mesh topology by diving each cell of a dense 3D grid into five tetrahedrons
The resulting tets have positive volume assuming that `z` is the fastest moving index direction
Args:
Nx: Resolution of the grid along `x` dimension
Ny: Resolution of the grid along `y` dimension
Nz: Resolution of the grid along `z` dimension
Returns:
Array of shape (5 * Nx * Ny * Nz, 4) containing vertex indices for each tet
"""
# Global node indices for each cell
cx, cy, cz = np.meshgrid(
np.arange(Nx, dtype=int), np.arange(Ny, dtype=int), np.arange(Nz, dtype=int), indexing="ij"
)
grid_vidx = np.array(
[
(Ny + 1) * (Nz + 1) * cx + (Nz + 1) * cy + cz,
(Ny + 1) * (Nz + 1) * cx + (Nz + 1) * cy + cz + 1,
(Ny + 1) * (Nz + 1) * cx + (Nz + 1) * (cy + 1) + cz,
(Ny + 1) * (Nz + 1) * cx + (Nz + 1) * (cy + 1) + cz + 1,
(Ny + 1) * (Nz + 1) * (cx + 1) + (Nz + 1) * cy + cz,
(Ny + 1) * (Nz + 1) * (cx + 1) + (Nz + 1) * cy + cz + 1,
(Ny + 1) * (Nz + 1) * (cx + 1) + (Nz + 1) * (cy + 1) + cz,
(Ny + 1) * (Nz + 1) * (cx + 1) + (Nz + 1) * (cy + 1) + cz + 1,
]
)
# decompose grid cells into 5 tets
tet_vidx = np.array(
[
[0, 1, 2, 4],
[3, 2, 1, 7],
[5, 1, 7, 4],
[6, 7, 4, 2],
[4, 1, 2, 7],
]
)
# Convert to 3d index coordinates
vidx_coords = np.array(
[
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
]
)
tet_coords = vidx_coords[tet_vidx]
# Symmetry bits for each cell
ox, oy, oz = np.meshgrid(
np.arange(Nx, dtype=int) % 2, np.arange(Ny, dtype=int) % 2, np.arange(Nz, dtype=int) % 2, indexing="ij"
)
tet_coords = np.broadcast_to(tet_coords, shape=(*ox.shape, *tet_coords.shape))
# Flip coordinates according to symmetry
ox_bk = np.broadcast_to(ox.reshape(*ox.shape, 1, 1), tet_coords.shape[:-1])
oy_bk = np.broadcast_to(oy.reshape(*oy.shape, 1, 1), tet_coords.shape[:-1])
oz_bk = np.broadcast_to(oz.reshape(*oz.shape, 1, 1), tet_coords.shape[:-1])
tet_coords_x = tet_coords[..., 0] ^ ox_bk
tet_coords_y = tet_coords[..., 1] ^ oy_bk
tet_coords_z = tet_coords[..., 2] ^ oz_bk
# Back to local vertex indices
corner_indices = 4 * tet_coords_x + 2 * tet_coords_y + tet_coords_z
# Now go from cell-local to global node indices
# There must be a nicer way than this, but for small grids this works
corner_indices = corner_indices.reshape(-1, 4)
grid_vidx = grid_vidx.reshape((8, -1, 1))
grid_vidx = np.broadcast_to(grid_vidx, shape=(8, grid_vidx.shape[1], 5))
grid_vidx = grid_vidx.reshape((8, -1))
node_indices = np.arange(corner_indices.shape[0])
tet_grid_vidx = np.transpose(
[
grid_vidx[corner_indices[:, 0], node_indices],
grid_vidx[corner_indices[:, 1], node_indices],
grid_vidx[corner_indices[:, 2], node_indices],
grid_vidx[corner_indices[:, 3], node_indices],
]
)
return tet_grid_vidx
def grid_to_quads(Nx: int, Ny: int):
"""Constructs a quadrilateral mesh topology from a dense 2D grid
The resulting quads will be indexed counter-clockwise
Args:
Nx: Resolution of the grid along `x` dimension
Ny: Resolution of the grid along `y` dimension
Returns:
Array of shape (Nx * Ny, 4) containing vertex indices for each quadrilateral
"""
quad_vtx = np.array(
[
[0, 0],
[1, 0],
[1, 1],
[0, 1],
]
).T
quads = np.stack(np.meshgrid(np.arange(0, Nx), np.arange(0, Ny), indexing="ij"))
quads_vtx_shape = (*quads.shape, quad_vtx.shape[1])
quads_vtx = np.broadcast_to(quads.reshape(*quads.shape, 1), quads_vtx_shape) + np.broadcast_to(
quad_vtx.reshape(2, 1, 1, quad_vtx.shape[1]), quads_vtx_shape
)
quad_vtx_indices = quads_vtx[0] * (Ny + 1) + quads_vtx[1]
return quad_vtx_indices.reshape(-1, 4)
def grid_to_hexes(Nx: int, Ny: int, Nz: int):
"""Constructs a hexahedral mesh topology from a dense 3D grid
The resulting hexes will be indexed following usual convention assuming that `z` is the fastest moving index direction
(counter-clockwise bottom vertices, then counter-clockwise top vertices)
Args:
Nx: Resolution of the grid along `x` dimension
Ny: Resolution of the grid along `y` dimension
Nz: Resolution of the grid along `z` dimension
Returns:
Array of shape (Nx * Ny * Nz, 8) containing vertex indices for each hexaedron
"""
hex_vtx = np.array(
[
[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1],
]
).T
hexes = np.stack(np.meshgrid(np.arange(0, Nx), np.arange(0, Ny), np.arange(0, Nz), indexing="ij"))
hexes_vtx_shape = (*hexes.shape, hex_vtx.shape[1])
hexes_vtx = np.broadcast_to(hexes.reshape(*hexes.shape, 1), hexes_vtx_shape) + np.broadcast_to(
hex_vtx.reshape(3, 1, 1, 1, hex_vtx.shape[1]), hexes_vtx_shape
)
hexes_vtx_indices = hexes_vtx[0] * (Nz + 1) * (Ny + 1) + hexes_vtx[1] * (Nz + 1) + hexes_vtx[2]
return hexes_vtx_indices.reshape(-1, 8)
| 15,630 | Python | 30.514113 | 159 | 0.603391 |
NVIDIA/warp/warp/fem/domain.py | from enum import Enum
from typing import Union
import warp as wp
import warp.codegen
import warp.context
from warp.fem.geometry import (
Element,
Geometry,
GeometryPartition,
WholeGeometryPartition,
)
GeometryOrPartition = Union[Geometry, GeometryPartition]
class GeometryDomain:
"""Interface class for domains, i.e. (partial) views of elements in a Geometry"""
class ElementKind(Enum):
"""Possible kinds of elements contained in a domain"""
CELL = 0
SIDE = 1
def __init__(self, geometry: GeometryOrPartition):
if isinstance(geometry, GeometryPartition):
self.geometry_partition = geometry
else:
self.geometry_partition = WholeGeometryPartition(geometry)
self.geometry = self.geometry_partition.geometry
@property
def name(self) -> str:
return f"{self.geometry_partition.name}_{self.__class__.__name__}"
def __str__(self) -> str:
return self.name
def __eq__(self, other) -> bool:
return self.__class__ == other.__class__ and self.geometry_partition == other.geometry_partition
@property
def element_kind(self) -> ElementKind:
"""Kind of elements that this domain contains (cells or sides)"""
raise NotImplementedError
@property
def dimension(self) -> int:
"""Dimension of the elements of the domain"""
raise NotImplementedError
def element_count(self) -> int:
"""Number of elements in the domain"""
raise NotImplementedError
def geometry_element_count(self) -> int:
"""Number of elements in the underlying geometry"""
return self.geometry.cell_count()
def reference_element(self) -> Element:
"""Protypical element"""
raise NotImplementedError
def element_index_arg_value(self, device: warp.context.Devicelike) -> warp.codegen.StructInstance:
"""Value of the argument to be passed to device functions"""
raise NotImplementedError
def element_arg_value(self, device: warp.context.Devicelike) -> warp.codegen.StructInstance:
"""Value of the argument to be passed to device functions"""
raise NotImplementedError
ElementIndexArg: warp.codegen.Struct
"""Structure containing arguments to be passed to device functions computing element indices"""
element_index: wp.Function
"""Device function for retrieving an ElementIndex from a linearized index"""
ElementArg: warp.codegen.Struct
"""Structure containing arguments to be passed to device functions computing element geometry"""
element_measure: wp.Function
"""Device function returning the measure determinant (e.g. volume, area) at a given point"""
element_measure_ratio: wp.Function
"""Device function returning the ratio of the measure of a side to that of its neighbour cells"""
element_position: wp.Function
"""Device function returning the element position at a sample point"""
element_deformation_gradient: wp.Function
"""Device function returning the gradient of the position with respect to the element's reference space"""
element_normal: wp.Function
"""Device function returning the element normal at a sample point"""
element_lookup: wp.Function
"""Device function returning the sample point corresponding to a world position"""
class Cells(GeometryDomain):
"""A Domain containing all cells of the geometry or geometry partition"""
def __init__(self, geometry: GeometryOrPartition):
super().__init__(geometry)
@property
def element_kind(self) -> GeometryDomain.ElementKind:
return GeometryDomain.ElementKind.CELL
@property
def dimension(self) -> int:
return self.geometry.dimension
def reference_element(self) -> Element:
return self.geometry.reference_cell()
def element_count(self) -> int:
return self.geometry_partition.cell_count()
def geometry_element_count(self) -> int:
return self.geometry.cell_count()
@property
def ElementIndexArg(self) -> warp.codegen.Struct:
return self.geometry_partition.CellArg
def element_index_arg_value(self, device: warp.context.Devicelike) -> warp.codegen.StructInstance:
return self.geometry_partition.cell_arg_value(device)
@property
def element_index(self) -> wp.Function:
return self.geometry_partition.cell_index
def element_arg_value(self, device: warp.context.Devicelike) -> warp.codegen.StructInstance:
return self.geometry.cell_arg_value(device)
@property
def ElementArg(self) -> warp.codegen.Struct:
return self.geometry.CellArg
@property
def element_position(self) -> wp.Function:
return self.geometry.cell_position
@property
def element_deformation_gradient(self) -> wp.Function:
return self.geometry.cell_deformation_gradient
@property
def element_measure(self) -> wp.Function:
return self.geometry.cell_measure
@property
def element_measure_ratio(self) -> wp.Function:
return self.geometry.cell_measure_ratio
@property
def eval_normal(self) -> wp.Function:
return self.geometry.cell_normal
@property
def element_lookup(self) -> wp.Function:
return self.geometry.cell_lookup
class Sides(GeometryDomain):
"""A Domain containing all (interior and boundary) sides of the geometry or geometry partition"""
def __init__(self, geometry: GeometryOrPartition):
self.geometry = geometry
super().__init__(geometry)
@property
def element_kind(self) -> GeometryDomain.ElementKind:
return GeometryDomain.ElementKind.SIDE
@property
def dimension(self) -> int:
return self.geometry.dimension - 1
def reference_element(self) -> Element:
return self.geometry.reference_side()
def element_count(self) -> int:
return self.geometry_partition.side_count()
def geometry_element_count(self) -> int:
return self.geometry.side_count()
@property
def ElementIndexArg(self) -> warp.codegen.Struct:
return self.geometry_partition.SideArg
def element_index_arg_value(self, device: warp.context.Devicelike) -> warp.codegen.StructInstance:
return self.geometry_partition.side_arg_value(device)
@property
def element_index(self) -> wp.Function:
return self.geometry_partition.side_index
@property
def ElementArg(self) -> warp.codegen.Struct:
return self.geometry.SideArg
def element_arg_value(self, device: warp.context.Devicelike) -> warp.codegen.StructInstance:
return self.geometry.side_arg_value(device)
@property
def element_position(self) -> wp.Function:
return self.geometry.side_position
@property
def element_deformation_gradient(self) -> wp.Function:
return self.geometry.side_deformation_gradient
@property
def element_measure(self) -> wp.Function:
return self.geometry.side_measure
@property
def element_measure_ratio(self) -> wp.Function:
return self.geometry.side_measure_ratio
@property
def eval_normal(self) -> wp.Function:
return self.geometry.side_normal
class BoundarySides(Sides):
"""A Domain containing boundary sides of the geometry or geometry partition"""
def __init__(self, geometry: GeometryOrPartition):
super().__init__(geometry)
def element_count(self) -> int:
return self.geometry_partition.boundary_side_count()
def geometry_element_count(self) -> int:
return self.geometry.boundary_side_count()
@property
def element_index(self) -> wp.Function:
return self.geometry_partition.boundary_side_index
class FrontierSides(Sides):
"""A Domain containing frontier sides of the geometry partition (sides shared with at least another partition)"""
def __init__(self, geometry: GeometryOrPartition):
super().__init__(geometry)
def element_count(self) -> int:
return self.geometry_partition.frontier_side_count()
def geometry_element_count(self) -> int:
raise RuntimeError("Frontier sides not defined at the geometry level")
@property
def element_index(self) -> wp.Function:
return self.geometry_partition.frontier_side_index
| 8,366 | Python | 30.813688 | 117 | 0.687306 |
NVIDIA/warp/warp/fem/operator.py | import inspect
from typing import Any, Callable
import warp as wp
from warp.fem import utils
from warp.fem.types import Domain, Field, Sample
class Integrand:
"""An integrand is a device function containing arbitrary expressions over Field and Domain variables.
It will get transformed to a proper warp.Function by resolving concrete Field types at call time.
"""
def __init__(self, func: Callable):
self.func = func
self.name = wp.codegen.make_full_qualified_name(self.func)
self.module = wp.get_module(self.func.__module__)
self.argspec = inspect.getfullargspec(self.func)
class Operator:
"""
Operators provide syntaxic sugar over Field and Domain evaluation functions and arguments
"""
def __init__(self, func: Callable, resolver: Callable):
self.func = func
self.resolver = resolver
def integrand(func: Callable):
"""Decorator for functions to be integrated (or interpolated) using warp.fem"""
itg = Integrand(func)
itg.__doc__ = func.__doc__
return itg
def operator(resolver: Callable):
"""Decorator for functions operating on Field-like or Domain-like data inside warp.fem integrands"""
def wrap_operator(func: Callable):
op = Operator(func, resolver)
op.__doc__ = func.__doc__
return op
return wrap_operator
# Domain operators
@operator(resolver=lambda dmn: dmn.element_position)
def position(domain: Domain, s: Sample):
"""Evaluates the world position of the sample point `s`"""
pass
@operator(resolver=lambda dmn: dmn.eval_normal)
def normal(domain: Domain, s: Sample):
"""Evaluates the element normal at the sample point `s`. Null for interior points."""
pass
@operator(resolver=lambda dmn: dmn.element_deformation_gradient)
def deformation_gradient(domain: Domain, s: Sample):
"""Evaluates the gradient of the domain position with respect to the element reference space at the sample point `s`"""
pass
@operator(resolver=lambda dmn: dmn.element_lookup)
def lookup(domain: Domain, x: Any) -> Sample:
"""Looks-up the sample point corresponding to a world position `x`, projecting to the closest point on the domain.
Arg:
x: world position of the point to look-up in the geometry
guess: (optional) :class:`Sample` initial guess, may help perform the query
Notes:
Currently this operator is only fully supported for :class:`Grid2D` and :class:`Grid3D` geometries.
For :class:`TriangleMesh2D` and :class:`Tetmesh` geometries, the operator requires providing `guess`.
"""
pass
@operator(resolver=lambda dmn: dmn.element_measure)
def measure(domain: Domain, s: Sample) -> float:
"""Returns the measure (volume, area, or length) determinant of an element at a sample point `s`"""
pass
@operator(resolver=lambda dmn: dmn.element_measure_ratio)
def measure_ratio(domain: Domain, s: Sample) -> float:
"""Returns the maximum ratio between the measure of this element and that of higher-dimensional neighbours."""
pass
# Field operators
# On a side, inner and outer are such that normal goes from inner to outer
@operator(resolver=lambda f: f.eval_inner)
def inner(f: Field, s: Sample):
"""Evaluates the field at a sample point `s`. On oriented sides, uses the inner element"""
pass
@operator(resolver=lambda f: f.eval_grad_inner)
def grad(f: Field, s: Sample):
"""Evaluates the field gradient at a sample point `s`. On oriented sides, uses the inner element"""
pass
@operator(resolver=lambda f: f.eval_div_inner)
def div(f: Field, s: Sample):
"""Evaluates the field divergence at a sample point `s`. On oriented sides, uses the inner element"""
pass
@operator(resolver=lambda f: f.eval_outer)
def outer(f: Field, s: Sample):
"""Evaluates the field at a sample point `s`. On oriented sides, uses the outer element. On interior points and on domain boundaries, this is equivalent to :func:`inner`."""
pass
@operator(resolver=lambda f: f.eval_grad_outer)
def grad_outer(f: Field, s: Sample):
"""Evaluates the field gradient at a sample point `s`. On oriented sides, uses the outer element. On interior points and on domain boundaries, this is equivalent to :func:`grad`."""
pass
@operator(resolver=lambda f: f.eval_grad_outer)
def div_outer(f: Field, s: Sample):
"""Evaluates the field divergence at a sample point `s`. On oriented sides, uses the outer element. On interior points and on domain boundaries, this is equivalent to :func:`div`."""
pass
@operator(resolver=lambda f: f.eval_degree)
def degree(f: Field):
"""Polynomial degree of a field"""
pass
@operator(resolver=lambda f: f.at_node)
def at_node(f: Field, s: Sample):
"""For a Test or Trial field, returns a copy of the Sample `s` moved to the coordinates of the node being evaluated"""
pass
# Common derived operators, for convenience
@integrand
def D(f: Field, s: Sample):
"""Symmetric part of the (inner) gradient of the field at `s`"""
return utils.symmetric_part(grad(f, s))
@integrand
def curl(f: Field, s: Sample):
"""Skew part of the (inner) gradient of the field at `s`, as a vector such that ``wp.cross(curl(u), v) = skew(grad(u)) v``"""
return utils.skew_part(grad(f, s))
@integrand
def jump(f: Field, s: Sample):
"""Jump between inner and outer element values on an interior side. Zero for interior points or domain boundaries"""
return inner(f, s) - outer(f, s)
@integrand
def average(f: Field, s: Sample):
"""Average between inner and outer element values"""
return 0.5 * (inner(f, s) + outer(f, s))
@integrand
def grad_jump(f: Field, s: Sample):
"""Jump between inner and outer element gradients on an interior side. Zero for interior points or domain boundaries"""
return grad(f, s) - grad_outer(f, s)
@integrand
def grad_average(f: Field, s: Sample):
"""Average between inner and outer element gradients"""
return 0.5 * (grad(f, s) + grad_outer(f, s))
# Set default call operators for argument types, so that field(s) = inner(field, s) and domain(s) = position(domain, s)
Field.call_operator = inner
Domain.call_operator = position
| 6,207 | Python | 31.502618 | 186 | 0.697116 |
NVIDIA/warp/warp/fem/dirichlet.py | from typing import Any, Optional
import warp as wp
from warp.sparse import BsrMatrix, bsr_assign, bsr_axpy, bsr_copy, bsr_mm, bsr_mv
from warp.types import type_is_matrix, type_length
from .utils import array_axpy
def normalize_dirichlet_projector(projector_matrix: BsrMatrix, fixed_value: Optional[wp.array] = None):
"""
Scale projector so that it becomes idempotent, and apply the same scaling to fixed_value if provided
"""
if projector_matrix.nrow < projector_matrix.nnz or projector_matrix.ncol != projector_matrix.nrow:
raise ValueError("Projector must be a square diagonal matrix, with at most one non-zero block per row")
# Cast blocks to matrix type if necessary
projector_values = projector_matrix.values
if not type_is_matrix(projector_values.dtype):
projector_values = wp.array(
data=None,
ptr=projector_values.ptr,
capacity=projector_values.capacity,
device=projector_values.device,
dtype=wp.mat(shape=projector_matrix.block_shape, dtype=projector_matrix.scalar_type),
shape=projector_values.shape[0],
)
if fixed_value is None:
wp.launch(
kernel=_normalize_dirichlet_projector_kernel,
dim=projector_matrix.nrow,
device=projector_values.device,
inputs=[projector_matrix.offsets, projector_matrix.columns, projector_values],
)
else:
if fixed_value.shape[0] != projector_matrix.nrow:
raise ValueError("Fixed value array must be of length equal to the number of rows of blocks")
if type_length(fixed_value.dtype) == 1:
# array of scalars, convert to 1d array of vectors
fixed_value = wp.array(
data=None,
ptr=fixed_value.ptr,
capacity=fixed_value.capacity,
device=fixed_value.device,
dtype=wp.vec(length=projector_matrix.block_shape[0], dtype=projector_matrix.scalar_type),
shape=fixed_value.shape[0],
)
wp.launch(
kernel=_normalize_dirichlet_projector_and_values_kernel,
dim=projector_matrix.nrow,
device=projector_values.device,
inputs=[projector_matrix.offsets, projector_matrix.columns, projector_values, fixed_value],
)
def project_system_rhs(
system_matrix: BsrMatrix, system_rhs: wp.array, projector_matrix: BsrMatrix, fixed_value: Optional[wp.array] = None
):
"""Projects the right-hand-side of a linear system to enforce Dirichlet boundary conditions
``rhs = (I - projector) * ( rhs - system * projector * fixed_value) + projector * fixed_value``
"""
rhs_tmp = wp.empty_like(system_rhs)
rhs_tmp.assign(system_rhs)
if fixed_value is None:
system_rhs.zero_()
else:
bsr_mv(A=projector_matrix, x=fixed_value, y=system_rhs, alpha=1.0, beta=0.0)
bsr_mv(A=system_matrix, x=system_rhs, y=rhs_tmp, alpha=-1.0, beta=1.0)
# here rhs_tmp = system_rhs - system_matrix * projector * fixed_value
# system_rhs = projector * fixed_value
array_axpy(x=rhs_tmp, y=system_rhs, alpha=1.0, beta=1.0)
bsr_mv(A=projector_matrix, x=rhs_tmp, y=system_rhs, alpha=-1.0, beta=1.0)
def project_system_matrix(system_matrix: BsrMatrix, projector_matrix: BsrMatrix):
"""Projects the right-hand-side of a linear system to enforce Dirichlet boundary conditions
``system = (I - projector) * system * (I - projector) + projector``
"""
complement_system = bsr_copy(system_matrix)
bsr_mm(x=projector_matrix, y=system_matrix, z=complement_system, alpha=-1.0, beta=1.0)
bsr_assign(dest=system_matrix, src=complement_system)
bsr_axpy(x=projector_matrix, y=system_matrix)
bsr_mm(x=complement_system, y=projector_matrix, z=system_matrix, alpha=-1.0, beta=1.0)
def project_linear_system(
system_matrix: BsrMatrix,
system_rhs: wp.array,
projector_matrix: BsrMatrix,
fixed_value: Optional[wp.array] = None,
normalize_projector=True,
):
"""
Projects both the left-hand-side and right-hand-side of a linear system to enforce Dirichlet boundary conditions
If normalize_projector is True, first apply scaling so that the projector_matrix is idempotent
"""
if normalize_projector:
normalize_dirichlet_projector(projector_matrix, fixed_value)
project_system_rhs(system_matrix, system_rhs, projector_matrix, fixed_value)
project_system_matrix(system_matrix, projector_matrix)
@wp.kernel
def _normalize_dirichlet_projector_kernel(
offsets: wp.array(dtype=int),
columns: wp.array(dtype=int),
block_values: wp.array(dtype=Any),
):
row = wp.tid()
beg = offsets[row]
end = offsets[row + 1]
if beg == end:
return
diag = wp.lower_bound(columns, beg, end, row)
if diag < end and columns[diag] == row:
P = block_values[diag]
P_sq = P * P
trace_P = wp.trace(P)
trace_P_sq = wp.trace(P_sq)
if wp.nonzero(trace_P_sq):
scale = trace_P / trace_P_sq
block_values[diag] = scale * P
else:
block_values[diag] = P - P
@wp.kernel
def _normalize_dirichlet_projector_and_values_kernel(
offsets: wp.array(dtype=int),
columns: wp.array(dtype=int),
block_values: wp.array(dtype=Any),
fixed_values: wp.array(dtype=Any),
):
row = wp.tid()
beg = offsets[row]
end = offsets[row + 1]
if beg == end:
return
diag = wp.lower_bound(columns, beg, end, row)
if diag < end and columns[diag] == row:
P = block_values[diag]
P_sq = P * P
trace_P = wp.trace(P)
trace_P_sq = wp.trace(P_sq)
if wp.nonzero(trace_P_sq):
scale = trace_P / trace_P_sq
block_values[diag] = scale * P
fixed_values[row] = scale * fixed_values[row]
else:
block_values[diag] = P - P
fixed_values[row] = fixed_values[row] - fixed_values[row]
| 6,037 | Python | 32.731843 | 119 | 0.638893 |
NVIDIA/warp/warp/fem/types.py | import warp as wp
# kept to avoid breaking existing example code, no longer used internally
vec2i = wp.vec2i
vec3i = wp.vec3i
vec4i = wp.vec4i
Coords = wp.vec3
OUTSIDE = wp.constant(-1.0e8)
ElementIndex = int
QuadraturePointIndex = int
NodeIndex = int
NULL_ELEMENT_INDEX = wp.constant(-1)
NULL_QP_INDEX = wp.constant(-1)
NULL_NODE_INDEX = wp.constant(-1)
DofIndex = wp.vec2i
"""Opaque descriptor for indexing degrees of freedom within elements"""
NULL_DOF_INDEX = wp.constant(DofIndex(-1, -1))
@wp.func
def get_node_index_in_element(dof_idx: DofIndex):
return dof_idx[0]
@wp.func
def get_node_coord(dof_idx: DofIndex):
return dof_idx[1]
@wp.struct
class NodeElementIndex:
domain_element_index: ElementIndex
node_index_in_element: int
@wp.struct
class Sample:
"""Per-sample point context for evaluating fields and related operators in integrands"""
element_index: ElementIndex
"""Index of the geometry element the sample point is in"""
element_coords: Coords
"""Coordinates of the sample point inside the element"""
qp_index: QuadraturePointIndex = NULL_QP_INDEX
"""If the sample corresponds to a quadrature point, its global index"""
qp_weight: float = 0.0
"""If the sample corresponds to a quadrature point, its weight"""
test_dof: DofIndex = NULL_DOF_INDEX
"""For linear of bilinear form assembly, index of the test degree-of-freedom currently being considered"""
trial_dof: DofIndex = NULL_DOF_INDEX
"""For bilinear form assembly, index of the trial degree-of-freedom currently being considered"""
@wp.func
def make_free_sample(element_index: ElementIndex, element_coords: Coords):
"""Returns a :class:`Sample` that is not associated to any quadrature point or dof"""
return Sample(element_index, element_coords, NULL_QP_INDEX, 0.0, NULL_DOF_INDEX, NULL_DOF_INDEX)
class Field:
"""
Tag for field-like integrand arguments
"""
call_operator: "warp.fem.operator.Operator" = None # noqa: F821 Set in operator.py
class Domain:
"""
Tag for domain-like integrand arguments
"""
call_operator: "warp.fem.operator.Operator" = None # noqa: F821 Set in operator.py
| 2,188 | Python | 27.064102 | 110 | 0.713437 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.