file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_planar_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/array_planar_complex.h"
#include "cutlass/gemm/warp/tile_iterator_planar_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Underlying real-valued warp-level matrix multiply
typename Operator_,
/// Transformation applied to A operand (typically folded into math instruction)
ComplexTransform TransformA = ComplexTransform::kNone,
/// Transformation applied to B operand (typically folded into math instruction)
ComplexTransform TransformB = ComplexTransform::kNone
>
class MmaPlanarComplex {
public:
/// Underlying real-valued warp-level matrix multiply
using Operator = Operator_;
/// Shape of warp-level matrix multipy
using Shape = typename Operator::Shape;
/// Transformation applied to A operand (typically folded into math instruction)
static ComplexTransform const kTransformA = TransformA;
/// Transformation applied to B operand (typically folded into math instruction)
static ComplexTransform const kTransformB = TransformB;
/// Fragment of elements
using FragmentA = ArrayPlanarComplex<typename Operator::ElementA, Operator::FragmentA::kElements>;
/// Iterator into planar complex
using IteratorA = TileIteratorPlanarComplex<typename Operator::IteratorA>;
/// Layout in memory of the A operand
using LayoutA = typename Operator::LayoutA;
using FragmentB = ArrayPlanarComplex<typename Operator::ElementB, Operator::FragmentB::kElements>;
/// Iterator into planar complex
using IteratorB = TileIteratorPlanarComplex<typename Operator::IteratorB>;
/// Layout in memory of the B operand
using LayoutB = typename Operator::LayoutB;
/// Tile iterator for accumulator
using IteratorC = TileIteratorPlanarComplex<typename Operator::IteratorC>;
/// Accumulator fragment
using FragmentC = ArrayPlanarComplex<typename Operator::ElementC, Operator::FragmentC::kElements>;
/// Layout of accumulator fragment in memory
using LayoutC = typename Operator::LayoutC;
private:
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Operator::Shape::kM / Operator::Policy::Operator::Shape::kM,
Operator::Shape::kN / Operator::Policy::Operator::Shape::kN
>;
public:
/// Ctor
CUTLASS_DEVICE
MmaPlanarComplex() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A_in,
FragmentB const &B_in,
FragmentC const &C) const {
D.real = C.real;
D.imag = C.imag;
//
// Transform fragments based on conjugate operations.
//
negate<typename FragmentA::ArrayReal> neg_A;
FragmentA frag_A;
frag_A.real = A_in.real;
if (kTransformA == ComplexTransform::kConjugate) {
frag_A.imag = neg_A(frag_A.imag);
}
else {
frag_A.imag = frag_A.imag;
}
FragmentB frag_B;
frag_B.real = B_in.real;
if (kTransformB == ComplexTransform::kConjugate) {
negate<typename FragmentB::ArrayReal> neg;
frag_B.imag = neg(frag_B.imag);
}
else {
frag_B.imag = frag_B.imag;
}
//
// Accumulated real-valued matrix multiplies
//
Operator real_mma;
// D.i += A.i * B.r
real_mma(D.imag, frag_A.imag, frag_B.real, D.imag);
// D.r += A.r * B.r
real_mma(D.real, frag_A.real, frag_B.real, D.real);
// D.i += A.r * B.i
real_mma(D.imag, frag_A.real, frag_B.imag, D.imag);
// D.r += -A.i * B.i
frag_A.imag = neg_A(frag_A.imag);
real_mma(D.real, frag_A.imag, frag_B.imag, D.real);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 6,144 | C | 32.579235 | 100 | 0.650228 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_sparse_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate
operations targeting sparse Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sparse.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK_ = 1,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Used for partial specialization
typename Enable = bool
>
class SparseMmaTensorOp {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Equivalant base dense mma
using Base = MmaTensorOp<Shape, ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, Policy, PartitionsK_,
AccumulatorsInRowMajor, Enable>;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Base::ArchMmaOperator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Architecture tag from underlying instruction
using ArchTag = typename Base::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = typename Base::OperatorClass;
/// Shape of underlying instruction
using InstructionShape = typename Base::InstructionShape;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Base::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Base::kTransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// Sparsity in Operand A
static int const kSparse = Policy::Operator::kSparse;
/// Meta data size in bits
static int const kMetaSizeInBits = Policy::Operator::kMetaSizeInBits;
/// Max ID2
static int const kMaxID2 = Policy::Operator::kMaxID2;
/// Data type of meta E that is moved at the same time
using ElementE =
typename cutlass::platform::conditional<kMaxID2 == 1, uint32_t,
uint16_t>::type;
/// Number of ElementA that is associated with one ElementE
static int const kElementsPerElementE =
128 / cutlass::sizeof_bits<ElementA>::value;
/// Meta data is essentially interleaved but mapped to ColumnMajor internally
static int const kInterleaved = 2;
/// Layout of meta E
using LayoutE = cutlass::layout::ColumnMajor;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK / kSparse>, Operand::kA, ElementA,
LayoutA,
MatrixShape<Policy::Operator::Shape::kM,
Policy::Operator::Shape::kK / kSparse>,
Policy::OpDelta::kRow, kThreadCount, kPartitionsK>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA =
Array<typename Policy::Operator::ElementA, FragmentA::kElements>;
/// Iterates over the B operand in memory
using IteratorB = typename Base::IteratorB;
/// Storage for B tile
using FragmentB = typename Base::FragmentB;
/// Storage for transformed B tile
using TransformedFragmentB = typename Base::TransformedFragmentB;
/// Iterates over the C operand in memory
using IteratorC = typename Base::IteratorC;
/// Storage for C tile
using FragmentC = typename Base::FragmentC;
/// Iterates over the E operand in memory
using IteratorE = SparseMmaTensorOpMetaTileIterator<
MatrixShape<Shape::kM * kInterleaved,
Shape::kK / kSparse / kElementsPerElementE / kInterleaved>,
ElementE, LayoutE,
MatrixShape<Policy::Operator::Shape::kM,
Policy::Operator::Shape::kK / kSparse / kElementsPerElementE /
kInterleaved>,
Policy::OpDelta::kRow, kThreadCount, kPartitionsK>;
/// Storage for E tile
using FragmentE = typename IteratorE::Fragment;
/// Number of mma operations performed
using MmaIterations = typename Base::MmaIterations;
public:
/// Underlying matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
SparseMmaTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C,
FragmentE const &E
) const {
using MmaOperandA = typename Policy::Operator::FragmentA;
using MmaOperandB = typename Policy::Operator::FragmentB;
using MmaOperandC = typename Policy::Operator::FragmentC;
using MmaOperandE = typename Policy::Operator::FragmentE;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
D = C;
MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A);
MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B);
MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D);
MmaOperandE const *ptr_E = reinterpret_cast<MmaOperandE const *>(&E);
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
int id2 = m % kMaxID2;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n);
if (AccumulatorsInRowMajor) { // matrix B is reordered
mma(
ptr_D[n_serpentine + m * MmaIterations::kColumn],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[n_serpentine + m * MmaIterations::kColumn],
ptr_E[(m / kMaxID2)],
id2);
} else {
mma(ptr_D[m + n_serpentine * MmaIterations::kRow],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[m + n_serpentine * MmaIterations::kRow],
ptr_E[(m / kMaxID2)],
id2);
}
}
}
#else
assert(0);
#endif
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
//
// Define conversions from source type to instruction type
//
FloatRoundStyle const kRoundA =
PreferredRoundingMode<typename ArchMmaOperator::ElementA,
ElementA>::kRound;
FloatRoundStyle const kRoundB =
PreferredRoundingMode<typename ArchMmaOperator::ElementB,
ElementB>::kRound;
detail::ConvertAndPack<typename ArchMmaOperator::ElementA, ElementA,
FragmentA::kElements / 2, kRoundA>
convert_A;
NumericArrayConverter<typename ArchMmaOperator::ElementB, ElementB,
FragmentB::kElements, kRoundB>
convert_B;
Array<ElementA, FragmentA::kElements / 2> const *ptr_A =
reinterpret_cast<Array<ElementA, FragmentA::kElements / 2> const *>(&A);
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> *
ptr_dst_A = reinterpret_cast<Array<typename ArchMmaOperator::ElementA,
FragmentA::kElements / 2> *>(&dst_A);
dst_B = convert_B(B);
ptr_dst_A[0] = convert_A(ptr_A[0]);
ptr_dst_A[1] = convert_A(ptr_A[1]);
#else
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,758 | C | 33.585294 | 100 | 0.647559 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm70.h"
#include "cutlass/platform/platform.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads>
class MmaVoltaTensorOpMultiplicandTileIterator;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand::kA, Element_,
cutlass::layout::VoltaTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value>,
InstructionShape_, OpDelta_, 32> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kA;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::VoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kContiguous % InstructionShape::kContiguous),
"Shape of warp-level Mma must be divisible by operator shape.");
// Shape of one individual LDS.128
// TODO: 32 and 4 are hardcoded, 32-by-4 is logical shape
using LdsShape = layout::PitchLinearShape<
32,
4
>;
// LdsShapes are arranged in the strided direction in SMEM
using LdsIterations = layout::PitchLinearShape<
InstructionShape::kStrided / LdsShape::kStrided,
Shape::kContiguous / LdsShape::kContiguous
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Number of internal pointers needed to reference shared memory
static int const kPointerCount = 2;
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, Layout::kElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, Shape::kContiguous *
InstructionShape::kStrided / kThreads * 2>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_[kPointerCount];
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0) {
// swizzle patterns for operandA LDS are
// 1. (tid[4] << 3) | (tid[2:0] ^ tid[4])
// 2. (tid[4] << 3) | (tid[2:0] ^ tid[4] ^ 0b10010)
int vec_row = (lane_id >> 4); // tid[4]
int vec_col = ((lane_id & 4) >> 2); // tid[2]
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPointerCount; ++i) {
if(i == 1) {
vec_row |= 2;
}
int access_contiguous_idx = (vec_col << 2) | ((lane_id & 3) ^ vec_row);
int access_contiguous = access_contiguous_idx;
int access_strided = vec_row;
pointer_[i] = reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int contiguous_offset = tile_offset.contiguous();
int strided_offset = tile_offset.strided();
// To support 32x32 tile size
if (Shape::kContiguous == Policy::LdsShape::kContiguous) {
if (contiguous_offset % 2) {
AccessType const *tmp_pointer = pointer_[0];
pointer_[0] = pointer_[1];
pointer_[1] = tmp_pointer;
}
contiguous_offset = contiguous_offset / 2 * 2;
}
int offset = (strided_offset * InstructionShape::kStrided) * stride_ *
Layout::kElementsPerAccess +
contiguous_offset * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator++() {
byte_offset_ += stride_ * InstructionShape::kStrided * sizeof(Element) *
Layout::kElementsPerAccess;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator--() {
byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) *
Layout::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType * fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsIterations::kContiguous;
AccessType const *source_ptr = pointer_[s & 1] +
Policy::LdsShape::kContiguous * c +
Policy::LdsShape::kStrided * (s / 2) * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
fetch_ptr[access_idx] = *(reinterpret_cast<AccessType const*> (source_byte_ptr));
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous /
Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand::kB, Element_,
cutlass::layout::VoltaTensorOpMultiplicandBCongruous<
sizeof_bits<Element_>::value>,
InstructionShape_, OpDelta_, 32> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kB;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::VoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kContiguous % InstructionShape::kContiguous),
"Shape of warp-level Mma must be divisible by operator shape.");
// Shape of one individual LDS
// TODO: remove hardcoded 32 and 4
using LdsShape = layout::PitchLinearShape<
32,
4
>;
using LdsIterations = layout::PitchLinearShape<
Shape::kContiguous / LdsShape::kContiguous,
InstructionShape::kStrided / LdsShape::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, Layout::kElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile, needs on more time number of registers
using Fragment = Array<Element, Shape::kContiguous *
InstructionShape::kStrided / kThreads * 2>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0) {
// swizzle pattern is (tid & (3 << 3) | (tid[1:0] ^ tid[4:3]))
int access_strided = (lane_id >> 3) & 0x3;
int access_contiguous = ((lane_id ^ (lane_id >> 3)) & 0x3);
pointer_ = reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int contiguous_offset = tile_offset.contiguous();
int strided_offset = tile_offset.strided();
int offset = (strided_offset * InstructionShape::kStrided) * stride_ *
Layout::kElementsPerAccess +
contiguous_offset * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator++() {
byte_offset_ += stride_ * InstructionShape::kStrided * sizeof(Element) *
Layout::kElementsPerAccess;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator--() {
byte_offset_ += stride_ * InstructionShape::kStrided * sizeof(Element) *
Layout::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType * fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsIterations::kContiguous;
AccessType const *source_ptr = pointer_ +
Policy::LdsShape::kContiguous / Layout::kElementsPerAccess * c +
Policy::LdsShape::kStrided * s * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
fetch_ptr[access_idx] = *(reinterpret_cast<AccessType const*> (source_byte_ptr));
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous /
Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand::kA, Element_,
cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value>,
InstructionShape_, OpDelta_, 32> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kA;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaVoltaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::VoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand::kB, Element_,
cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<
sizeof_bits<Element_>::value>,
InstructionShape_, OpDelta_, 32> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kB;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaVoltaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::VoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.strided(), tile_offset.contiguous()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store
/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major
/// accumulator layout.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept |
/// WriteableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaVoltaTensorOpAccumulatorTileIterator {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
/// Volta Tensor Op uses 32x32 interleaved tile
using InterleavedTile = MatrixShape<32, 32>;
static_assert(!(Shape::kRow % InterleavedTile::kRow) && !(Shape::kColumn % InterleavedTile::kColumn),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(platform::is_same<TensorCoord, MatrixCoord>::value,
"Layouts must be defined for logical MatrixCoord coordinate space.");
/// Number of mma operations performed
using TileIterations = MatrixShape<
Shape::kRow / InterleavedTile::kRow,
Shape::kColumn / InterleavedTile::kColumn
>;
using MmaIterations =
MatrixShape<InterleavedTile::kRow / InstructionShape::kM,
InterleavedTile::kColumn / InstructionShape::kN>;
};
private:
// Assume accumulator tile is multipile interleaved 32x32 tile.
static int const kElementsPerPartial = 4;
using EleShapePerPatial = typename platform::conditional<
platform::is_same<Element, float>::value,
MatrixShape<2, 2>,
MatrixShape<1, 4> >::type;
static int const kElementsPerMma = 8;
static int const kAccumulatorPatials = 2;
using QuadShapePerPatialMma = MatrixShape<4, 4>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, Shape::kCount / kThreads>;
private:
/// Reference to output tensor
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
int accum_m, accum_n;
if (platform::is_same<Element, float>::value) {
// (quad[2],quad[0])+lane_in_quad[0]
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1);
// (quad[1])+lane_in_quad[1]
accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials +
(lane_in_quad & 2);
} else {
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + lane_in_quad; // (quad[2],quad[0])
accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials;
}
MatrixCoord lane_offset(accum_m, accum_n);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn));
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) {
CUTLASS_PRAGMA_UNROLL
for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) {
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start =
(((tile_n * Policy::TileIterations::kRow + tile_m) *
Policy::MmaIterations::kColumn + mma_n) *
Policy::MmaIterations::kRow + mma_m) *
kElementsPerMma;
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < kAccumulatorPatials; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < EleShapePerPatial::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < EleShapePerPatial::kColumn; ++n) {
int accum_m = tile_m * Policy::InterleavedTile::kRow +
mma_m * QuadShapePerPatialMma::kRow + m * 2;
int accum_n = tile_n * Policy::InterleavedTile::kColumn +
mma_n * QuadShapePerPatialMma::kColumn +
p * Policy::InterleavedTile::kColumn/2 + n;
int idx = mma_accum_start + p * kElementsPerPartial +
m * EleShapePerPatial::kColumn + n;
frag[idx] = offset_ref.at({accum_m, accum_n});
}
}
}
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_HOST_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_HOST_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) {
CUTLASS_PRAGMA_UNROLL
for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) {
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start =
(((tile_n * Policy::TileIterations::kRow + tile_m) *
Policy::MmaIterations::kColumn + mma_n) *
Policy::MmaIterations::kRow + mma_m) *
kElementsPerMma;
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < kAccumulatorPatials; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < EleShapePerPatial::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < EleShapePerPatial::kColumn; ++n) {
int accum_m = tile_m * Policy::InterleavedTile::kRow +
mma_m * QuadShapePerPatialMma::kRow + m * 2;
int accum_n = tile_n * Policy::InterleavedTile::kColumn +
mma_n * QuadShapePerPatialMma::kColumn +
p * Policy::InterleavedTile::kColumn/2 + n;
int idx = mma_accum_start + p * kElementsPerPartial +
m * EleShapePerPatial::kColumn + n;
offset_ref.at({accum_m, accum_n}) = frag[idx];
}
}
}
}
}
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_HOST_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_HOST_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_HOST_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDS to
/// load from shared memory and therefore must be initialized with a TensorRef
/// to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// KBlock size (in units of elements)
int KBlock>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::VoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, KBlock>,
InstructionShape_, OpDelta_, 32> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand == Operand::kB,
"MmaVoltaTensorOpMultiplicandIterator may only be instantiated for "
"A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// KBlock size
static int const kKBlock = KBlock;
/// Layout of source tile
using Layout = cutlass::layout::VoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, kKBlock>;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
/// Shape of one individual LDS instruction
using LdsShape = layout::PitchLinearShape<1, 32>;
/// Number and arrangement of LDSM instructions
using LdsIterations = layout::PitchLinearShape<1, Shape::kStrided / 32>;
/// Using LDS.128
static int const kElementsPerAccess = 8;
/// Contiguous elements per line
static int const kContiguousElementsPerLine = 4;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element,
Shape::kStrided * InstructionShape::kContiguous / kThreads * 2>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
/// Crosswised elements are arranged in a SMEM line
/// in units of AccessType
Index line_size;
/// Internal counter used to determine load addr offset
/// and when to swap higher 64bit with lower 64bit
int k_group_idx_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator()
: pointer_(nullptr),
stride_(0),
line_size(0),
byte_offset_(0),
k_group_idx_(0) {}
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id)
: pointer_(reinterpret_cast<AccessType const *>(ref.data())),
stride_(ref.stride(0) * Policy::kElementsPerAccess),
line_size((ref.stride(0) * Policy::kContiguousElementsPerLine) /
Policy::kElementsPerAccess),
k_group_idx_(0),
byte_offset_(0) {
int quad = (lane_id / 4);
int lane_in_quad = (lane_id % 4);
int access_contiguous;
if(kOperand == Operand::kA) {
// swizzle id: tid[4]|tid[1:0]|(tid[2]^tid[4])
access_contiguous = ((quad & 0x4) << 1) + ((lane_in_quad) << 1) +
((quad & 0x1) ^ ((quad & 0x4) >> 2));
} else {
// swizzle id: tid[4]|tid[1:0]|tid[3]
access_contiguous = ((quad & 0x4) << 1) + (lane_in_quad << 1) +
((quad & 0x2) >> 1 ^ ((quad & 0x4) >> 2));
}
byte_offset_ = access_contiguous *
sizeof(Element) * Policy::kElementsPerAccess;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
int contiguous_offset = tile_offset.contiguous();
int strided_offset = tile_offset.strided();
k_group_idx_ = 0;
pointer_ += contiguous_offset *
(InstructionShape::kContiguous /
Policy::kContiguousElementsPerLine) *
line_size +
strided_offset * Shape::kStrided / 2;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator++() {
k_group_idx_ = (k_group_idx_ + 1) % 8;
if (k_group_idx_ == 4 || k_group_idx_ == 0) {
byte_offset_ ^= 1 * sizeof(Element) * Policy::kElementsPerAccess;
}
pointer_ += line_size;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator--() { assert(0); }
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { load_with_byte_offset(frag, 0); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType * fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsIterations::kContiguous;
AccessType const *source_ptr = pointer_ +
Policy::LdsShape::kContiguous * c * line_size +
Policy::LdsShape::kStrided * s / 2;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
fetch_ptr[access_idx] = *(reinterpret_cast<AccessType const*> (source_byte_ptr));
// swap higher 64bit and lower 64bit
if (k_group_idx_ & 0x2) {
uint64_t *low = reinterpret_cast<uint64_t *>(&frag) + access_idx * 2;
uint64_t *high = reinterpret_cast<uint64_t *>(&frag) + access_idx * 2 + 1;
uint64_t tmp = *low;
*low = *high;
*high = tmp;
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset = tile_offset.contiguous() *
InstructionShape::kContiguous /
Policy::kElementsPerAccess +
tile_offset.strided() * Shape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
k_group_idx_ = k_group;
}
};
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDS to
/// load from shared memory and therefore must be initialized with a TensorRef
/// to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// KBlock size (in units of elements)
int KBlock>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, KBlock>,
InstructionShape_, OpDelta_, 32> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand == Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for "
"A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// KBlock size
static int const kKBlock = KBlock;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, kKBlock>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaVoltaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::VoltaTensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
kKBlock>,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator() {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id)
: iterator_({ref.data(), ref.stride()}, lane_id) {}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { iterator_.load(frag); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag, {tile_offset.contiguous(), tile_offset.strided()}, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDS to
/// load from shared memory and therefore must be initialized with a TensorRef
/// to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// KBlock size (in units of elements)
int KBlock>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, KBlock>,
InstructionShape_, OpDelta_, 32> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand == Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for "
"A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// KBlock size
static int const kKBlock = KBlock;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, kKBlock>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaVoltaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::VoltaTensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
kKBlock>,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator() {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id)
: iterator_({ref.data(), ref.stride()}, lane_id) {}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { iterator_.load(frag); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for 'TN' arrangement
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand_,
/// Data type of A elements
typename Element_,
/// Layout of matrix operand
typename Layout_,
/// Shape of one matrix production operation (concept: MatrixShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads = 32,
/// Number of partitions along K dimension
int PartitionsK_ = 1>
class MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
/// Basic check
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaVoltaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Number of elements accessed per Shared Memory load
static int const kElementsPerAccess = 4;
private:
static int const kInterleavedTileRows = 32;
static int const kInterleavedTileColumns = 32;
static int const kInstructionsPerTile = 2;
/// Rounded up instruction counts
using TileCount = MatrixShape<
Shape::kRow / kInterleavedTileRows,
Shape::kColumn / kInterleavedTileColumns
>;
using FragmentCount = MatrixShape<
TileCount::kRow * kInstructionsPerTile,
TileCount::kColumn * kInstructionsPerTile
>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
(kOperand == Operand::kA ? FragmentCount::kRow : FragmentCount::kColumn) * kElementsPerAccess
>;
/// Memory access type
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Underlying tensor reference
TensorRef ref_;
/// Extent of tensor
MatrixCoord extent_;
/// Origin
MatrixCoord origin_;
/// Used to conditionally enable extents checking
bool divisible_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner(): divisible_(true) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner(
TensorRef const &ref,
int lane_id
):
ref_(ref), extent_(Shape::kRow, Shape::kColumn), divisible_(true) {
int quad_id = lane_id / 4;
int lane_in_quad = (lane_id % 4);
if (kOperand == Operand::kA) {
int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile + lane_in_quad;
int col_idx = 0;
origin_ = MatrixCoord(row_idx, col_idx);
}
else {
int row_idx = 0;
int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile + lane_in_quad;
origin_ = MatrixCoord(row_idx, col_idx);
}
ref_.add_coord_offset(origin_);
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner(
TensorRef const &ref,
TensorCoord extent,
int lane_id
): ref_(ref), extent_(extent), divisible_(false) {
int quad_id = lane_id / 4;
int lane_in_quad = (lane_id % 4);
if (kOperand == Operand::kA) {
int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile + lane_in_quad;
int col_idx = 0;
origin_ = MatrixCoord(row_idx, col_idx);
}
else {
int row_idx = 0;
int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile + lane_in_quad;
origin_ = MatrixCoord(row_idx, col_idx);
}
#if defined(__CUDA_ARCH__)
__syncthreads();
#endif
ref_.add_coord_offset(origin_);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner &add_tile_offset(TensorCoord const &tile_offset) {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator++() {
if (kOperand == Operand::kA) {
add_tile_offset({0, 1});
}
else {
add_tile_offset({1, 0});
}
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator--() {
if (kOperand == Operand::kA) {
add_tile_offset({0, -1});
}
else {
add_tile_offset({-1, 0});
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(ref_.data());
int ldm = ref_.stride()[0];
if (kOperand == Operand::kA) {
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < FragmentCount::kRow; ++idx) {
int tile_idx = idx / 2;
int quad_idx = idx % 2;
int row_offset = tile_idx * kInterleavedTileRows + quad_idx * 4;
frag_ptr[idx] = access_ptr[row_offset * ldm / kElementsPerAccess];
}
}
else {
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < FragmentCount::kColumn; ++idx) {
int tile_idx = idx / 2;
int quad_idx = idx % 2;
int col_offset = tile_idx * kInterleavedTileColumns + quad_idx * 4;
frag_ptr[idx] = access_ptr[col_offset * ldm / kElementsPerAccess];
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
load_with_pointer_offset(frag, byte_offset * 8 / sizeof_bits<Element>::value);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset) + pointer_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset) + byte_offset * 8 / sizeof_bits<Element>::value);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation
}
};
/// Tile iterator specialized for 'NT' arrangement
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand_,
/// Data type of A elements
typename Element_,
/// Layout of matrix operand
typename Layout_,
/// Shape of one matrix production operation (concept: MatrixShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads = 32,
/// Number of partitions along K dimension
int PartitionsK_ = 1>
class MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
/// Basic check
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaVoltaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Number of elements accessed per Shared Memory load
static int const kElementsPerAccess = 4;
private:
static int const kInterleavedTileRows = 32;
static int const kInterleavedTileColumns = 32;
static int const kInstructionsPerTile = 2;
/// Rounded up instruction counts
using TileCount = MatrixShape<
Shape::kRow / kInterleavedTileRows,
Shape::kColumn / kInterleavedTileColumns
>;
using FragmentCount = MatrixShape<
TileCount::kRow * kInstructionsPerTile,
TileCount::kColumn * kInstructionsPerTile
>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
(kOperand == Operand::kA ? FragmentCount::kRow : FragmentCount::kColumn) * kElementsPerAccess
>;
/// Memory access type
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Underlying tensor reference
TensorRef ref_;
/// Extent of tensor
MatrixCoord extent_;
/// Origin
MatrixCoord origin_;
/// Used to conditionally enable extents checking
bool divisible_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter(): divisible_(true) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter(
TensorRef const &ref,
int lane_id
):
ref_(ref), extent_(Shape::kRow, Shape::kColumn), divisible_(true) {
int quad_id = lane_id / 4;
int lane_in_quad = (lane_id % 4);
if (kOperand == Operand::kA) {
int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile;
int col_idx = lane_in_quad;
origin_ = MatrixCoord(row_idx, col_idx);
}
else {
int row_idx = lane_in_quad;
int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile;
origin_ = MatrixCoord(row_idx, col_idx);
}
ref_.add_coord_offset(origin_);
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter(
TensorRef const &ref,
TensorCoord extent,
int lane_id
): ref_(ref), extent_(extent), divisible_(false) {
int quad_id = lane_id / 4;
int lane_in_quad = (lane_id % 4);
if (kOperand == Operand::kA) {
int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile;
int col_idx = lane_in_quad;
origin_ = MatrixCoord(row_idx, col_idx);
}
else {
int row_idx = lane_in_quad;
int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile;
origin_ = MatrixCoord(row_idx, col_idx);
}
#if defined(__CUDA_ARCH__)
__syncthreads();
#endif
ref_.add_coord_offset(origin_);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter &add_tile_offset(TensorCoord const &tile_offset) {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator++() {
if (kOperand == Operand::kA) {
add_tile_offset({0, 1});
}
else {
add_tile_offset({1, 0});
}
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator--() {
if (kOperand == Operand::kA) {
add_tile_offset({0, -1});
}
else {
add_tile_offset({-1, 0});
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(ref_.data());
int ldm = ref_.stride()[0];
if (kOperand == Operand::kA) {
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < FragmentCount::kRow; ++idx) {
int tile_idx = idx / 2;
int quad_idx = idx % 2;
int row_offset = tile_idx * kInterleavedTileRows;
frag_ptr[idx] = access_ptr[row_offset / kElementsPerAccess + quad_idx];
}
}
else {
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < FragmentCount::kColumn; ++idx) {
int tile_idx = idx / 2;
int quad_idx = idx % 2;
int col_offset = tile_idx * kInterleavedTileColumns;
frag_ptr[idx] = access_ptr[col_offset / kElementsPerAccess + quad_idx];
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
load_with_pointer_offset(frag, byte_offset * 8 / sizeof_bits<Element>::value);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset) + pointer_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset) + byte_offset * 8 / sizeof_bits<Element>::value);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_,
Operand::kA,
Element_,
cutlass::layout::RowMajor,
InstructionShape_,
OpDelta_,
32
> : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner<
Shape_, Operand::kA, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> {
public:
using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner<
Shape_, Operand::kA, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> ;
using TensorRef = typename Base::TensorRef;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): Base(ref, lane_id) { }
};
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_,
Operand::kA,
Element_,
cutlass::layout::ColumnMajor,
InstructionShape_,
OpDelta_,
32
> : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter<
Shape_, Operand::kA, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_> {
public:
using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter<
Shape_, Operand::kA, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_> ;
using TensorRef = typename Base::TensorRef;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): Base(ref, lane_id) { }
};
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand::kB, Element_,
cutlass::layout::ColumnMajor,
InstructionShape_, OpDelta_, 32
> : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner<
Shape_, Operand::kB, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_> {
public:
using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner<
Shape_, Operand::kB, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_>;
using TensorRef = typename Base::TensorRef;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): Base(ref, lane_id) { }
};
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_>
class MmaVoltaTensorOpMultiplicandTileIterator<
Shape_, Operand::kB, Element_,
cutlass::layout::RowMajor,
InstructionShape_, OpDelta_, 32
> : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter<
Shape_, Operand::kB, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> {
public:
using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter<
Shape_, Operand::kB, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_>;
using TensorRef = typename Base::TensorRef;
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaVoltaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): Base(ref, lane_id) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 99,649 | C | 31.072739 | 112 | 0.667051 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/functional.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/arch/mma_sm90.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
/// Data type of real & imag members of complex numbers in the SourceFragment
typename RealElement,
/// Destination fragment required by the mma operation
typename DestinationFragment,
/// Source fragment holding complex<RealElement> elements
typename SourceFragment,
/// Number of mma operations performed
typename MmaIterations,
/// Shape of operand elements
typename MmaOperandShape,
/// Complex transform on A operand
ComplexTransform Transform_,
/// Operand A or Operand B
Operand Operand_,
/// Floating-point rounding style
FloatRoundStyle Round_>
struct UnpackComplexConvertAndPackForMma;
// Partial specialization for OperandA and Congruous smem layout
template <
typename RealElement,
typename DestinationFragment,
typename SourceFragment,
typename MmaIterations,
typename MmaOperandShape,
ComplexTransform Transform_,
FloatRoundStyle Round_>
struct UnpackComplexConvertAndPackForMma <
RealElement,
DestinationFragment,
SourceFragment,
MmaIterations,
MmaOperandShape,
Transform_,
Operand::kA,
Round_> {
//
// Type definitions
//
static Operand const kOperand = Operand::kA;
static ComplexTransform const kTransform = Transform_;
static FloatRoundStyle const kRound = Round_;
// Data type of elements in the destination fragment
using MmaElement = typename DestinationFragment::Element;
// Numeric convertor MmaElement <= RealElement
using Converter = NumericConverter<MmaElement, RealElement, kRound>;
// Operand layout parameters
using SourceFragmentLayout = layout::ColumnMajor;
static int const kLdm = MmaIterations::kRow * MmaOperandShape::kRow;
/// Ctor
CUTLASS_DEVICE
UnpackComplexConvertAndPackForMma() {}
CUTLASS_DEVICE
void operator()(DestinationFragment *dest, SourceFragment const &source) {
Converter convert_op;
SourceFragmentLayout layout(kLdm);
CUTLASS_PRAGMA_UNROLL
for(int i=0; i<MmaIterations::kRow; i++) {
int pos = 0;
CUTLASS_PRAGMA_UNROLL
for(int c=0; c<MmaOperandShape::kColumn; c++) {
CUTLASS_PRAGMA_UNROLL
for(int r=0; r<MmaOperandShape::kRow; r++) {
// Logical position of element in source fragment
int row = r + i * MmaOperandShape::kRow;
int col = c;
// Access complex<RealElement> and apply rounding on real and imag parts
MmaElement a = convert_op(source[layout(MatrixCoord{row,col})].real());
MmaElement b = convert_op(source[layout(MatrixCoord{row,col})].imag());
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest[i][pos] = a;
dest[i+MmaIterations::kRow][pos++] = (kTransform == ComplexTransform::kConjugate ? -b : b);
}
}
}
}
};
// Partial specialization for OperandB and Congruous smem layout
template <
typename RealElement,
typename DestinationFragment,
typename SourceFragment,
typename MmaIterations,
typename MmaOperandShape,
ComplexTransform Transform_,
FloatRoundStyle Round_>
struct UnpackComplexConvertAndPackForMma <
RealElement,
DestinationFragment,
SourceFragment,
MmaIterations,
MmaOperandShape,
Transform_,
Operand::kB,
Round_> {
//
// Type definitions
//
static Operand const kOperand = Operand::kB;
static ComplexTransform const kTransform = Transform_;
static FloatRoundStyle const kRound = Round_;
// Data type of elements in the destination fragment
using MmaElement = typename DestinationFragment::Element;
// Numeric convertor MmaElement <= RealElement
using Converter = NumericConverter<MmaElement, RealElement, kRound>;
// Operand layout parameters
using SourceFragmentLayout = layout::RowMajor;
static int const kLdm = MmaIterations::kColumn * MmaOperandShape::kColumn;
/// Ctor
CUTLASS_DEVICE
UnpackComplexConvertAndPackForMma() {}
CUTLASS_HOST_DEVICE
void operator()(DestinationFragment *dest, SourceFragment const &source) {
Converter convert_op;
SourceFragmentLayout layout(kLdm);
CUTLASS_PRAGMA_UNROLL
for(int i=0; i<MmaIterations::kColumn; i++) {
int pos = 0;
CUTLASS_PRAGMA_UNROLL
for(int c=0; c<MmaOperandShape::kColumn; c++) {
CUTLASS_PRAGMA_UNROLL
for(int r=0; r<MmaOperandShape::kRow; r++) {
// Logical position of element in source fragment
int row = r;
int col = c + i * MmaOperandShape::kColumn;
// Access complex<RealElement> apply rounding on real and imag parts
MmaElement a = convert_op(source[layout(MatrixCoord{row,col})].real());
MmaElement b = convert_op(source[layout(MatrixCoord{row,col})].imag());
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest[i][pos] = a;
dest[i+MmaIterations::kColumn][pos++] = (kTransform == ComplexTransform::kConjugate ? -b : b);
}
}
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transform on B operand
ComplexTransform TransformB = ComplexTransform::kNone,
/// Do source operands need more than one elements
bool GeneralizedOperatorElements = false,
/// Used for partial specialization
typename Enable = bool
>
class MmaComplexTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex using real-valued TensorOps
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaComplexTensorOp<
Shape_,
complex<RealElementA>,
LayoutA_,
complex<RealElementB>,
LayoutB_,
complex<RealElementC>,
LayoutC_,
Policy_,
TransformA,
TransformB> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicyTensorOp)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Architecture tag from underlying instruction
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = FragmentB;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'planar complex' in the sense that all real-valued
/// parts are stored consecutively followed by all imaginary parts. This matches the structure
/// of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
static_assert(
FragmentC::kElements == 2 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements,
"Unexpected planar complex fragment length.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
static_assert(MmaOperandA::kElements == 1,
"This implementation only supports math instructions in which exactly one element is needed for the A operand."
"We can geneneralize later.");
static_assert(MmaOperandB::kElements == 1,
"This implementation only supports math instructions in which exactly one element is needed for the B operand."
"We can geneneralize later.");
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.real(), a.real(), b.real(), accum.real());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
operand_A[0] = A[m].real();
operand_B[0] = B[n].real();
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.imag(), a.real(), b.imag(), accum.imag());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
operand_A[0] = A[m].real();
operand_B[0] = (kTransformB == ComplexTransform::kConjugate ? -B[n].imag() : B[n].imag());
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.real(), -a.imag(), b.imag(), accum.real())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
// A imaginary part is intentionally negated
operand_A[0] = (kTransformA == ComplexTransform::kConjugate ? A[m].imag() : -A[m].imag());
operand_B[0] = (kTransformB == ComplexTransform::kConjugate ? -B[n].imag() : B[n].imag());
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.imag(), a.imag(), b.real(), accum.imag())
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
operand_A[0] = (kTransformA == ComplexTransform::kConjugate ? -A[m].imag() : A[m].imag());
operand_B[0] = B[n].real();
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
//TODO: Implement this
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex:
// Operands data type: complex<float>
// Rounding: float -> tfloat32_t (round half_ulp_truncate nearest)
// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
// Output data type: complex<float>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaComplexTensorOp<
Shape_,
complex<float>,
LayoutA_,
complex<float>,
LayoutB_,
complex<float>,
LayoutC_,
Policy_,
TransformA,
TransformB> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of members of complex multiplicand A
using RealElementA = float;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of members of complex multiplicand B
using RealElementB = float;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of members of complex accumulator matrix C
using RealElementC = float;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = typename arch::OpMultiplyAddComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA =
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements * 2>;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB =
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements * 2>;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of complex products operations performed (one complex product needs four mma instructions)
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'planar complex' in the sense that all real-valued
/// parts are stored consecutively followed by all imaginary parts. This matches the structure
/// of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using InstMmaOperandA = typename ArchMmaOperator::FragmentA;
using InstMmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
static_assert(platform::is_same<cutlass::gemm::GemmShape<16, 8, 8>, typename ArchMmaOperator::Shape>::value,
"This implementation only supports mma.m16n8k8 math instructions.");
static_assert(InstMmaOperandA::kElements == 4,
"This implementation only supports math instructions in which exactly four element is needed for the A operand."
"We can geneneralize later.");
static_assert(InstMmaOperandB::kElements == 2,
"This implementation only supports math instructions in which exactly two element is needed for the B operand."
"We can geneneralize later.");
// Instruction Operands A & B holding real part followed by imaginary part for mma operations
InstMmaOperandA const *operand_A = reinterpret_cast<InstMmaOperandA const *>(&A);
InstMmaOperandB const *operand_B = reinterpret_cast<InstMmaOperandB const *>(&B);
//
// Accumulate in place
//
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.real(), a.real(), b.real(), accum.real());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A[m], operand_B[n], *accum);
}
// mma(accum.imag(), a.real(), b.imag(), accum.imag());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A[m], operand_B[n+MmaIterations::kColumn], *accum);
}
// mma(accum.real(), a.imag(), -b.imag(), accum.real())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// negate OperandB to accumulate -(a.imag()*b.imag())
// negating OperandB emits less instrucitons than negating OperandA as OperandB has less elements
negate<InstMmaOperandB> negate_op;
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A[m+MmaIterations::kRow], negate_op(operand_B[n+MmaIterations::kColumn]), *accum);
}
// mma(accum.imag(), a.imag(), b.real(), accum.imag())
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A[m+MmaIterations::kRow], operand_B[n], *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
// Alias types for underlying real-valued matrix multiply operator
using InstMmaOperandA = typename ArchMmaOperator::FragmentA;
using InstMmaOperandB = typename ArchMmaOperator::FragmentB;
//
// Define conversions from source type to instruction operands' type
//
FloatRoundStyle const kRoundA = FloatRoundStyle::round_half_ulp_trunc_dntz;
FloatRoundStyle const kRoundB = FloatRoundStyle::round_half_ulp_trunc_dntz;
detail::UnpackComplexConvertAndPackForMma <
RealElementA,
InstMmaOperandA,
FragmentA,
MmaIterations,
MatrixShape<2, 2>,
kTransformA,
Operand::kA,
kRoundA> convert_A;
detail::UnpackComplexConvertAndPackForMma <
RealElementB,
InstMmaOperandB,
FragmentB,
MmaIterations,
MatrixShape<2, 1>,
kTransformB,
Operand::kB,
kRoundB> convert_B;
// Convert Fragment[A|B] holding complex<RealElement[A|B]> to InstMmaOperand[A|B] holding InstMmaOperand[A|B]::Element
convert_A(reinterpret_cast<InstMmaOperandA *>(&dst_A), A);
convert_B(reinterpret_cast<InstMmaOperandB *>(&dst_B), B);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex:
// Operands data type: complex<double>
// Math instruction: mma.sync.aligned.m16n8k4.f64.f64.f64.f64
// Output data type: complex<double>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaComplexTensorOp<
Shape_,
complex<double>,
LayoutA_,
complex<double>,
LayoutB_,
complex<double>,
LayoutC_,
Policy_,
TransformA,
TransformB,
true> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of members of complex multiplicand A
using RealElementA = double;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of members of complex multiplicand B
using RealElementB = double;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of members of complex accumulator matrix C
using RealElementC = double;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicyTensorOp)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = typename arch::OpMultiplyAddComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = FragmentB;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'planar complex' in the sense that all real-valued
/// parts are stored consecutively followed by all imaginary parts. This matches the structure
/// of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
static_assert(
FragmentC::kElements == 2 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements,
"Unexpected planar complex fragment length.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.real(), a.real(), b.real(), accum.real());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = A[m*MmaOperandA::kElements + mk].real();
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = B[n*MmaOperandB::kElements + nk].real();
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.imag(), a.real(), b.imag(), accum.imag());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = A[m*MmaOperandA::kElements + mk].real();
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = (kTransformB == ComplexTransform::kConjugate ?
-B[n*MmaOperandB::kElements + nk].imag() : B[n*MmaOperandB::kElements + nk].imag());
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.real(), -a.imag(), b.imag(), accum.real())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
// A imaginary part is intentionally negated
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = (kTransformA == ComplexTransform::kConjugate ?
A[m*MmaOperandA::kElements + mk].imag() : -A[m*MmaOperandA::kElements + mk].imag());
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = (kTransformB == ComplexTransform::kConjugate ?
-B[n*MmaOperandB::kElements + nk].imag() : B[n*MmaOperandB::kElements + nk].imag());
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A, operand_B, *accum);
}
// mma(accum.imag(), a.imag(), b.real(), accum.imag())
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_A;
MmaOperandB operand_B;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_A[mk] = (kTransformA == ComplexTransform::kConjugate ?
-A[m*MmaOperandA::kElements + mk].imag() : A[m*MmaOperandA::kElements + mk].imag());
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_B[nk] = B[n*MmaOperandB::kElements + nk].real();
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A, operand_B, *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// TODO - partial specializations of real*complex and complex*real
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 37,705 | C | 31.282534 | 122 | 0.658109 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/tile_iterator_planar_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/array_planar_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename TileIterator_>
class TileIteratorPlanarComplex {
public:
/// Underlying iterator over real-valued tiles
using TileIterator = TileIterator_;
/// Underlying element type
using Element = typename TileIterator::Element;
/// Underlying layout type
using Layout = typename TileIterator::Layout;
/// TensorRef type for loading element from a tensor
using TensorRef = typename TileIterator::TensorRef;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Planar complex fragment
using Fragment = ArrayPlanarComplex<Element, TileIterator::Fragment::kElements>;
public:
/// Underlying tile iterator
TileIterator tile_iterator_;
/// Offset (in units of bytes) to the imaginary part of the planar complex matrix
LongIndex imaginary_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
TileIteratorPlanarComplex(): imaginary_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
TileIteratorPlanarComplex(
TensorRef const &ref,
int lane_id,
LongIndex imaginary_offset
):
tile_iterator_(ref, lane_id),
imaginary_offset_((imaginary_offset * sizeof_bits<Element>::value) / 8) { }
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
TileIteratorPlanarComplex &add_pointer_offset(LongIndex offset) {
tile_iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
TileIteratorPlanarComplex &add_tile_offset(TensorCoord const &tile_offset) {
tile_iterator_.add_tile_offset(tile_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
TileIteratorPlanarComplex & operator++() {
++tile_iterator_;
return *this;
}
//
// WIP
//
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
TileIteratorPlanarComplex & operator--() {
--tile_iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
TileIteratorPlanarComplex & operator+=(TensorCoord const &tile_offset) {
tile_iterator_.add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
TileIteratorPlanarComplex & operator-=(TensorCoord const &tile_offset) {
tile_iterator_.add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
tile_iterator_.load_with_byte_offset(frag.real, 0);
tile_iterator_.load_with_byte_offset(frag.imag, imaginary_offset_);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
tile_iterator_.load_with_byte_offset(frag.real, byte_offset);
tile_iterator_.load_with_byte_offset(frag.imag, byte_offset + imaginary_offset_);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
Index byte_offset = (pointer_offset * sizeof_bits<Element>::value)/8;
tile_iterator_.load_with_byte_offset(frag.real, byte_offset);
tile_iterator_.load_with_byte_offset(frag.imag, byte_offset + imaginary_offset_);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
tile_iterator_.load_with_byte_offset(frag.real, tile_offset, 0);
tile_iterator_.load_with_byte_offset(frag.imag, tile_offset, imaginary_offset_);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
Index byte_offset = (pointer_offset * sizeof_bits<Element>::value)/8;
tile_iterator_.load_with_byte_offset(frag.real, tile_offset, byte_offset);
tile_iterator_.load_with_byte_offset(frag.real, tile_offset, byte_offset + imaginary_offset_);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
tile_iterator_.load_with_byte_offset(frag.real, tile_offset, byte_offset);
tile_iterator_.load_with_byte_offset(frag.imag, tile_offset, byte_offset + imaginary_offset_);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
tile_iterator_.set_kgroup_index(k_group);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,728 | C | 33.776892 | 100 | 0.672892 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/default_mma_tensor_op_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Default warp-level GEMM operators selected by data type, size, and layouts of operands.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_fast_f32.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial Specialization - inputs and output types are float - uses BF16 internally
template <
/// Shape of one matrix production operation (concept: GemmShape)
typename WarpShape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Number of partitions along K dimension
int PartitionsK,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor>
struct DefaultMmaTensorOp<
WarpShape_,
GemmShape<16, 8, 8>,
float, LayoutA,
float, LayoutB,
float, LayoutC,
arch::OpMultiplyAddFastBF16,
PartitionsK, AccumulatorsInRowMajor> {
// Uses BF16 internally
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
GemmShape<16, 8, 8>,
32,
bfloat16_t, cutlass::layout::RowMajor,
bfloat16_t, cutlass::layout::ColumnMajor,
float, cutlass::layout::RowMajor,
arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1> >;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaTensorOp<
WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC,
Policy, PartitionsK, AccumulatorsInRowMajor>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial Specialization - inputs and output types are float - uses F16 internally
template <
/// Shape of one matrix production operation (concept: GemmShape)
typename WarpShape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Number of partitions along K dimension
int PartitionsK,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor>
struct DefaultMmaTensorOp<
WarpShape_,
GemmShape<16, 8, 8>,
float, LayoutA,
float, LayoutB,
float, LayoutC,
arch::OpMultiplyAddFastF16,
PartitionsK, AccumulatorsInRowMajor> {
// Uses F16 internally
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
GemmShape<16, 8, 8>,
32,
half_t, cutlass::layout::RowMajor,
half_t, cutlass::layout::ColumnMajor,
float, cutlass::layout::RowMajor,
arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1> >;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaTensorOp<
WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC,
Policy, PartitionsK, AccumulatorsInRowMajor>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial Specialization - inputs and output types are float - uses TF32 internally
template <
/// Shape of one matrix production operation (concept: GemmShape)
typename WarpShape_,
/// Shape of target matrix multiply instruction (concept: GemmShape)
typename InstructionShape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Number of partitions along K dimension
int PartitionsK,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor>
struct DefaultMmaTensorOp<
WarpShape_,
InstructionShape_,
float, LayoutA,
float, LayoutB,
float, LayoutC,
arch::OpMultiplyAdd, PartitionsK, AccumulatorsInRowMajor> {
// Uses TF32 internally
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
InstructionShape_,
32,
tfloat32_t, cutlass::layout::RowMajor,
tfloat32_t, cutlass::layout::ColumnMajor,
float, cutlass::layout::RowMajor,
arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1> >;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaTensorOp<
WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC,
Policy, PartitionsK, AccumulatorsInRowMajor>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial Specialization - inputs and output types are float - uses TF32 for Fast Accurate FP32
template <
/// Shape of one matrix production operation (concept: GemmShape)
typename WarpShape_,
/// Shape of target matrix multiply instruction (concept: GemmShape)
typename InstructionShape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Number of partitions along K dimension
int PartitionsK,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor>
struct DefaultMmaTensorOp<
WarpShape_,
InstructionShape_,
float, LayoutA,
float, LayoutB,
float, LayoutC,
arch::OpMultiplyAddFastF32, PartitionsK, AccumulatorsInRowMajor> {
// Uses TF32 internally
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
InstructionShape_,
32,
cutlass::tfloat32_t, cutlass::layout::RowMajor,
cutlass::tfloat32_t, cutlass::layout::ColumnMajor,
float, cutlass::layout::RowMajor,
arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1> >;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaTensorOpFastF32<
WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC,
Policy, PartitionsK, AccumulatorsInRowMajor>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
| 9,026 | C | 36.769874 | 100 | 0.626967 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/// Tile access iterator
/// Each iteration acess in the tile is
/// used as multiplicand for one
/// warp-level matrix multiplication
template <
/// Size of the tile (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand_,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of one matrix production operation (concept: MatrixShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads = 32,
/// Enable Residual Support
bool EnableResidual = false,
/// Number of partitions along K dimension
int PartitionsK_ = 1
>
class MmaTensorOpMultiplicandTileAccessIterator {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
/// Basic check
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Number of elements accessed per Shared Memory load
static int const kElementsPerAccess =
(sizeof_bits<Element>::value >= 32 ? 1 : 32 / sizeof_bits<Element>::value);
using InstructionCount = MatrixShape<
Shape::kRow / InstructionShape::kRow,
Shape::kColumn / InstructionShape::kColumn
>;
static int const kIterations = (kOperand == Operand::kA) ?
InstructionCount::kColumn : InstructionCount::kRow;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
(kOperand == Operand::kA) ?
(Shape::kRow * InstructionShape::kColumn / kThreads) :
(Shape::kColumn * InstructionShape::kRow / kThreads)
>;
/// Memory access type
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Underlying tensor reference
TensorRef ref_;
/// Extent of tensor
MatrixCoord extent_;
/// Origin
MatrixCoord origin_;
/// Used to load residual tile
bool is_residual_;
/// residual offset of each thread
TensorCoord residual_offset_;
/// Iterations in a tile
int iterations_;
public:
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator(
TensorRef const &ref,
TensorCoord extent,
int lane_id
): ref_(ref), extent_(extent), is_residual_(false), iterations_(0) {
if (kOperand == Operand::kA) {
origin_ = MatrixCoord(lane_id / 4, (lane_id % 4) * kElementsPerAccess);
}
else {
origin_ = MatrixCoord((lane_id % 4) * kElementsPerAccess, lane_id / 4);
}
ref_.add_coord_offset(origin_);
if(EnableResidual) {
// compute residual offset
if (kOperand == Operand::kA) {
typename TensorCoord::Index residual_size =
extent_.column() % Shape::kColumn;
if(residual_size) {
is_residual_ = true;
residual_offset_ = make_Coord(0, residual_size);
}
}
else {
typename TensorCoord::Index residual_size =
extent_.row() % Shape::kRow;
if(residual_size) {
is_residual_ = true;
residual_offset_ = make_Coord(residual_size, 0);
}
}
}
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator(
TensorRef const &ref,
int lane_id
): MmaTensorOpMultiplicandTileAccessIterator(ref,
{Shape::kRow, Shape::kColumn}, lane_id) {
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator &add_tile_offset(TensorCoord const &tile_offset) {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
void advance() {
if(EnableResidual && is_residual_) {
is_residual_ = false;
origin_ += residual_offset_;
ref_.add_coord_offset(residual_offset_);
}
else {
if (kOperand == Operand::kA) {
add_tile_offset({0, 1});
}
else {
add_tile_offset({1, 0});
}
}
iterations_ = 0;
}
/// increase iterations in a tile
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileAccessIterator & operator++() {
iterations_++;
if(iterations_ >= kIterations)
advance();
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
int const kWarpShapeDivisibleInner =
(kOperand == Operand::kA ? InstructionShape::kColumn : InstructionShape::kRow);
// Take advantage of Tensor Op's 8 x 4T access pattern
int const kAccessesInner = (kWarpShapeDivisibleInner / kElementsPerAccess) / 4;
AccessType *access_ptr = reinterpret_cast<AccessType *>(&frag);
if (kOperand == Operand::kA) {
int const kTilesPerInstruction = InstructionShape::kRow / 8;
CUTLASS_PRAGMA_UNROLL
for (int inst_m_idx = 0; inst_m_idx < InstructionCount::kRow; ++inst_m_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_m_idx = 0; access_m_idx < kTilesPerInstruction; ++access_m_idx) {
int access_idx =
access_m_idx + kTilesPerInstruction * (inner_idx + kAccessesInner * inst_m_idx);
MatrixCoord offset(
access_m_idx * 8 + inst_m_idx * InstructionShape::kRow,
inner_idx * 4 * kElementsPerAccess + iterations_ * InstructionShape::kColumn);
MatrixCoord access_coord = origin_ + offset;
// if(access_coord.row() < extent_.row() && access_coord.column() < extent_.column()) {
access_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
ref_.data() + ref_.offset(offset));
// }
// else {
// AccessType zero;
// zero.clear();
// access_ptr[access_idx] = zero;
// }
}
}
}
}
else {
CUTLASS_PRAGMA_UNROLL
for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn; ++inst_n_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
int access_idx = inner_idx + kAccessesInner * inst_n_idx;
MatrixCoord offset(
inner_idx * 4 * kElementsPerAccess + iterations_ * InstructionShape::kRow,
inst_n_idx * 8);
MatrixCoord access_coord = origin_ + offset;
// if(access_coord.row() < extent_.row() && access_coord.column() < extent_.column()) {
access_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
ref_.data() + ref_.offset(offset));
// }
// else {
// AccessType zero;
// zero.clear();
// access_ptr[access_idx] = zero;
// }
}
}
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 11,017 | C | 29.352617 | 103 | 0.630299 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for loading 128b vectors of 64b elements.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCongruous64b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
static_assert(!(Shape::kContiguous % 16) && !(Shape::kStrided % 4), "Divisibility.");
static_assert(sizeof_bits<Element_>::value == 64, "This is specialized for 64b accesses.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCongruous64b;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Load two elements per access
static int const kElementsPerAccess = 2;
/// Policy defining internal details of tile iterator
struct Policy {
/// Shape of one access
using Delta = layout::PitchLinearShape<8, 4>;
/// Number of iterations to load
using Iterations = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess / Delta::kContiguous,
InstructionShape::kStrided / Delta::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, kElementsPerAccess, 16>;
/// Internal counter used to jump to next K partition
int k_group_idx_;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0),
k_group_idx_(0) {
int access_strided = lane_id / Policy::Delta::kContiguous;
int access_contiguous = (lane_id % Policy::Delta::kContiguous) ^ access_strided;
pointer_= reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int offset =
(tile_offset.strided() * InstructionShape::kStrided) * stride_ * kElementsPerAccess +
tile_offset.contiguous() * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
add_tile_offset({0, 1});
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
add_tile_offset({0, -1});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::Iterations::kContiguous; ++c) {
int access_idx = c + s * Policy::Iterations::kContiguous;
AccessType const *source_ptr = pointer_ +
Policy::Delta::kContiguous * c +
Policy::Delta::kStrided * s * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr);
fetch_ptr[access_idx] = *source;
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::TensorOpMultiplicandCongruous64b,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.strided(), tile_offset.contiguous()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::TensorOpMultiplicandCongruous64b,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for loading 128b vectors of 64b elements.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicand64bCrosswise,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
static_assert(!(Shape::kContiguous % 4) && !(Shape::kStrided % 16), "Divisibility.");
static_assert(sizeof_bits<Element_>::value == 64, "This is specialized for 64b accesses.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicand64bCrosswise;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Load two elements per access
static int const kElementsPerAccess = 2;
/// Policy defining internal details of tile iterator
struct Policy {
/// Shape of one access
using Delta = layout::PitchLinearShape<4, 16>;
/// Number of iterations to load
using Iterations = layout::PitchLinearShape<
InstructionShape::kContiguous / Delta::kContiguous,
Shape::kStrided / Delta::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, kElementsPerAccess, 16>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kStrided * InstructionShape::kContiguous / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
/// Internal counter for tracking K-group
Index k_group_idx_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0),
k_group_idx_(0) {
int access_strided = lane_id / 8;
int access_contiguous = (lane_id % 8);
byte_offset_ = (access_contiguous + access_strided * stride_) * sizeof(AccessType);
pointer_= reinterpret_cast<AccessType const *>(ref.data());
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
pointer_ += offset / kElementsPerAccess;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int offset = (tile_offset.contiguous() * InstructionShape::kContiguous) *
stride_ * kElementsPerAccess +
tile_offset.strided() * Shape::kStrided;
add_pointer_offset(offset);
int old_k_group_idx = k_group_idx_;
k_group_idx_ += tile_offset.contiguous();
if ((k_group_idx_ & 2) ^ (old_k_group_idx & 2)) {
byte_offset_ ^= 0x40;
}
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset); // TODO fix this if it becomes an issue during warp it reset
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
pointer_ += stride_ * InstructionShape::kContiguous;
if (k_group_idx_ & 0x1) {
// xor ptr
byte_offset_ ^= 0x40;
}
++k_group_idx_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::Iterations::kStrided; ++s) {
int access_idx = c + s * Policy::Iterations::kContiguous;
AccessType const *source_ptr = pointer_ +
Policy::Delta::kContiguous * c * stride_ +
Policy::Delta::kStrided * s / kElementsPerAccess;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr);
fetch_ptr[access_idx] = *source;
}
}
Element *exchange_ptr = reinterpret_cast<Element *>(&frag);
if (k_group_idx_ & 1) {
// exchange on 64b granularity
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Fragment::kElements; i += 2) {
Element tmp = exchange_ptr[i];
exchange_ptr[i] = exchange_ptr[i + 1];
exchange_ptr[i + 1] = tmp;
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset = tile_offset.contiguous() *
InstructionShape::kContiguous /
Layout::kElementsPerAccess +
tile_offset.strided() * Shape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
k_group_idx_ = k_group;
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::TensorOpMultiplicand64bCrosswise,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) {
iterator_.add_tile_offset_negative({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.strided(), tile_offset.contiguous()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::TensorOpMultiplicand64bCrosswise,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) {
iterator_.add_tile_offset_negative({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for canonical matrix layouts
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand_,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of one matrix production operation (concept: MatrixShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads = 32,
/// Number of partitions along K dimension
int PartitionsK_ = 1>
class MmaTensorOpMultiplicandTileIteratorCanonical {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
/// Basic check
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Number of elements accessed per Shared Memory load
static int const kElementsPerAccess =
(sizeof_bits<Element>::value >= 32 ? 1 : 32 / sizeof_bits<Element>::value);
private:
static int const kWarpShapeOuter =
(kOperand == Operand::kA ? Shape::kRow : Shape::kColumn);
static int const kWarpShapeInner =
(kOperand == Operand::kA ? Shape::kColumn : Shape::kRow);
/// Rounded up instruction counts
using InstructionCount = MatrixShape<
Shape::kRow / InstructionShape::kRow,
Shape::kColumn / InstructionShape::kColumn
>;
/// Rounded up tile dimensions
using WarpShapeDivisible = MatrixShape<
InstructionCount::kRow * InstructionShape::kRow,
InstructionCount::kColumn * InstructionShape::kColumn
>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
WarpShapeDivisible::kRow * WarpShapeDivisible::kColumn / kThreads
>;
/// Memory access type
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Underlying tensor reference
TensorRef ref_;
/// Extent of tensor
MatrixCoord extent_;
/// Origin
MatrixCoord origin_;
/// Used to conditionally enable extents checking
bool divisible_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical(): divisible_(true) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical(
TensorRef const &ref,
int lane_id
): ref_(ref), extent_(Shape::kRow, Shape::kColumn), divisible_(true) {
if (kOperand == Operand::kA) {
origin_ = MatrixCoord(lane_id / 4, (lane_id % 4) * kElementsPerAccess);
}
else {
origin_ = MatrixCoord((lane_id % 4) * kElementsPerAccess, lane_id / 4);
}
ref_.add_coord_offset(origin_);
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical(
TensorRef const &ref,
TensorCoord extent,
int lane_id
): ref_(ref), extent_(extent), divisible_(false) {
if (kOperand == Operand::kA) {
origin_ = MatrixCoord(lane_id / 4, (lane_id % 4) * kElementsPerAccess);
}
else {
origin_ = MatrixCoord((lane_id % 4) * kElementsPerAccess, lane_id / 4);
}
ref_.add_coord_offset(origin_);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical &add_tile_offset(TensorCoord const &tile_offset) {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical & operator++() {
if (kOperand == Operand::kA) {
add_tile_offset({0, 1});
}
else {
add_tile_offset({1, 0});
}
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical & operator--() {
if (kOperand == Operand::kA) {
add_tile_offset({0, -1});
}
else {
add_tile_offset({-1, 0});
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIteratorCanonical & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
int const kWarpShapeDivisibleInner =
(kOperand == Operand::kA ? WarpShapeDivisible::kColumn : WarpShapeDivisible::kRow);
// Take advantage of Tensor Op's 8 x 4T access pattern
int const kAccessesInner = (kWarpShapeDivisibleInner / kElementsPerAccess) / 4;
AccessType *access_ptr = reinterpret_cast<AccessType *>(&frag);
if (kOperand == Operand::kA) {
int const kTilesPerInstruction = InstructionShape::kRow / 8;
CUTLASS_PRAGMA_UNROLL
for (int inst_m_idx = 0; inst_m_idx < InstructionCount::kRow; ++inst_m_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_m_idx = 0; access_m_idx < kTilesPerInstruction; ++access_m_idx) {
int access_idx =
access_m_idx + kTilesPerInstruction * (inner_idx + kAccessesInner * inst_m_idx);
MatrixCoord offset(
access_m_idx * 8 + inst_m_idx * InstructionShape::kRow,
inner_idx * 4 * kElementsPerAccess);
MatrixCoord access_coord = origin_ + offset;
if (divisible_ ||
(access_coord.row() < extent_.row() && access_coord.column() < extent_.column())) {
access_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
ref_.data() + ref_.offset(offset));
}
else {
AccessType zero;
zero.clear();
access_ptr[access_idx] = zero;
}
}
}
}
}
else {
CUTLASS_PRAGMA_UNROLL
for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn; ++inst_n_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
int access_idx = inner_idx + kAccessesInner * inst_n_idx;
MatrixCoord offset(
inner_idx * 4 * kElementsPerAccess,
inst_n_idx * 8);
MatrixCoord access_coord = origin_ + offset;
if (divisible_ ||
(access_coord.row() < extent_.row() && access_coord.column() < extent_.column())) {
access_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
ref_.data() + ref_.offset(offset));
}
else {
AccessType zero;
zero.clear();
access_ptr[access_idx] = zero;
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
load_with_pointer_offset(frag, byte_offset * 8 / sizeof_bits<Element>::value);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset) + pointer_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
load_with_pointer_offset(frag, ref_.offset(coord_offset) + byte_offset * 8 / sizeof_bits<Element>::value);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation
}
};
/// Wrapper for ColumnMajor
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajor,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIteratorCanonical<
Shape, kOperand, Element,
layout::ColumnMajor,
InstructionShape,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
TensorCoord const & extent,
int lane_id
): iterator_({ref.data(), ref.stride()}, extent, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
/// Wrapper for RowMajor
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajor,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIteratorCanonical<
Shape, kOperand, Element,
layout::RowMajor,
InstructionShape,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
TensorCoord const &extent,
int lane_id
): iterator_({ref.data(), ref.stride()}, extent, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 75,179 | C | 29.648186 | 110 | 0.681281 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_simt_policy.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Describes the lane policy used by warp-level matrix multiply operators targeting SIMT
instructions
*/
#pragma once
#include "cutlass/cutlass.h"
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Describes the arrangement and configuration of per-lane operations in warp-level matrix multiply
template <
typename WarpShape_, ///< shape of the warp in lanes (concept: MatrixShape)
typename LaneLayout_, ///< layout function of lanes
typename LaneMmaShape_ ///< size of each lane's thread-level matrix product (concept: GemmShape)
>
struct MmaSimtPolicy {
using WarpShape = WarpShape_;
using LaneLayout = LaneLayout_;
using LaneMmaShape = LaneMmaShape_;
using MmaShape = LaneMmaShape;
/// Returns a layout functor mapping lane position in the warp to thread ID
CUTLASS_HOST_DEVICE
static LaneLayout get_lane_layout() {
return LaneLayout::packed({WarpShape::kRow, WarpShape::kColumn});
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
| 3,079 | C | 42.999999 | 109 | 0.653134 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_simt_tile_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Describes the lane policy used by warp-level matrix multiply operators targeting SIMT
instructions
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Iterates over operands to warp-level matrix multiply operations targeting SIMT instructions
///
/// concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK = 1,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize = 1
>
class MmaSimtTileIterator;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization for A operands of column-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize
>
class MmaSimtTileIterator<Shape_, Operand::kA, Element_, layout::ColumnMajor, Policy_, PartitionsK, PartitionGroupSize> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kA;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::ColumnMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
//
// Derived quantities
//
static_assert(!(Shape::kRow % Policy::WarpShape::kRow),
"The warp-level GEMM M size must be divisible by the number of threads arranged along the M dimension.");
static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero.");
static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero.");
static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero.");
static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero.");
/// Thread-level shape of a fragment
using ThreadShape = MatrixShape<
Shape::kRow / Policy::WarpShape::kRow,
Shape::kColumn
>;
static_assert(!(ThreadShape::kRow % Policy::LaneMmaShape::kM),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
/// Number of individual loads
using Iterations = MatrixShape<
ThreadShape::kRow / Policy::LaneMmaShape::kM,
ThreadShape::kColumn
>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, ThreadShape::kCount>;
private:
/// Internal reference
cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kM>, layout::ColumnMajor> ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaSimtTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaSimtTileIterator(
TensorRef ref,
int lane_id
) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
MatrixCoord(Policy::LaneMmaShape::kM, 0);
ref.add_coord_offset(lane_offset);
ref_.reset(
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kM> *>(ref.data()),
ref.stride(0) / Policy::LaneMmaShape::kM);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
ref_.add_coord_offset({
coord.row() * Shape::kRow / Policy::LaneMmaShape::kM,
coord.column() * Shape::kColumn});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator++() {
ref_.add_coord_offset({0, Shape::kColumn});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator--() {
ref_.add_coord_offset({0, -Shape::kColumn});
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator. (vector loads)
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kM> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kM> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kColumn; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Iterations::kRow; ++m) {
// This logic has been replaced with calls to inline PTX to guarantee vectorization.
#if 0
dst_ptr[m + k * Iterations::kRow] =
*(ref_.data() + ref_.offset({m * Policy::WarpShape::kRow, k}) + pointer_offset / Policy::LaneMmaShape::kM);
#endif
auto ptr = ref_.data() + ref_.offset({m * Policy::WarpShape::kRow, k}) + pointer_offset / Policy::LaneMmaShape::kM;
arch::shared_load(dst_ptr[m + k * Iterations::kRow], ptr);
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kM> const *src_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kM> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kN; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Iterations::kM; ++m) {
*(ref_.data() + ref_.offset(m * Policy::WarpShape::kM, k) + pointer_offset / Policy::LaneMmaShape::kM) =
src_ptr[m + k * Iterations::kM];
}
}
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization for A operands of row-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize
>
class MmaSimtTileIterator<Shape_, Operand::kA, Element_, layout::RowMajor, Policy_, PartitionsK, PartitionGroupSize> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kA;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::RowMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
//
// Derived quantities
//
static_assert(!(Shape::kRow % Policy::WarpShape::kRow),
"The warp-level GEMM M size must be divisible by the number of threads arranged along the M dimension.");
static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero.");
static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero.");
static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero.");
static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero.");
/// Thread-level shape of a fragment
using ThreadShape = MatrixShape<
Shape::kRow / Policy::WarpShape::kRow,
Shape::kColumn
>;
static_assert(!(ThreadShape::kRow % Policy::LaneMmaShape::kM),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
/// Number of individual loads (scalar loads)
using Iterations = MatrixShape<
ThreadShape::kRow / Policy::LaneMmaShape::kM,
ThreadShape::kColumn
>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, ThreadShape::kCount>;
private:
/// Internal reference
cutlass::TensorRef<Element, layout::RowMajor> ref_;
/// Extent of tensor
MatrixCoord extent_;
/// Origin
MatrixCoord origin_;
/// Used to conditionally enable extents checking
bool divisible_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaSimtTileIterator() : divisible_(true) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaSimtTileIterator(
TensorRef ref,
int lane_id
) : extent_(Shape::kRow, Shape::kColumn), divisible_ (true) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
MatrixCoord(Policy::LaneMmaShape::kM, 0);
origin_ = lane_offset;
ref.add_coord_offset(lane_offset);
ref_.reset(ref.data(), ref.stride(0));
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaSimtTileIterator(
TensorRef ref,
TensorCoord extent,
int lane_id
) : extent_(extent), divisible_ (false) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
MatrixCoord(Policy::LaneMmaShape::kM, 0);
origin_ = lane_offset;
ref.add_coord_offset(lane_offset);
ref_.reset(ref.data(), ref.stride(0));
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
TensorCoord coord_offset(
coord.row() * Shape::kRow,
coord.column() * Shape::kColumn);
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator++() {
ref_.add_coord_offset({0, Shape::kColumn});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator--() {
ref_.add_coord_offset({0, -Shape::kColumn});
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator. (scalar loads)
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kColumn; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Iterations::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Policy::LaneMmaShape::kM; i++) {
MatrixCoord offset(m * Policy::WarpShape::kRow * Policy::LaneMmaShape::kM + i, k);
MatrixCoord access_coord = origin_ + offset;
int frag_idx = m * Policy::LaneMmaShape::kM + i + k * Iterations::kRow;
if (divisible_ ||
(access_coord.row() < extent_.row() && access_coord.column() < extent_.column())) {
frag[frag_idx] = *(ref_.data() + ref_.offset(offset) + pointer_offset);
}
else {
frag[frag_idx] = Element();
}
}
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kColumn; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Iterations::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Policy::LaneMmaShape::kM; i++) {
*(ref_.data() + ref_.offset(m * Policy::WarpShape::kM * Policy::LaneMmaShape::kM + i, k) + pointer_offset) =
frag[m * Policy::LaneMmaShape::kM + i + k * Iterations::kM];
}
}
}
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization for B operands of row-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize
>
class MmaSimtTileIterator<Shape_, Operand::kB, Element_, layout::RowMajor, Policy_, PartitionsK, PartitionGroupSize> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kB;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::RowMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
//
// Derived quantities
//
static_assert(!(Shape::kColumn % Policy::WarpShape::kColumn),
"The warp-level GEMM N size must be divisible by the number of threads arranged along the N dimension.");
static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero.");
static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero.");
static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero.");
static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero.");
/// Thread-level shape of a fragment
using ThreadShape = MatrixShape<
Shape::kRow,
Shape::kColumn / Policy::WarpShape::kColumn
>;
static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
/// Number of individual loads
using Iterations = MatrixShape<
ThreadShape::kRow,
ThreadShape::kColumn / Policy::LaneMmaShape::kN
>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, ThreadShape::kCount>;
protected:
/// Internal reference
cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kN>, layout::RowMajor> ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaSimtTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaSimtTileIterator(
TensorRef ref,
int lane_id
) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
MatrixCoord(0, Policy::LaneMmaShape::kN);
ref.add_coord_offset(lane_offset);
ref_.reset(
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(ref.data()),
ref.stride(0) / Policy::LaneMmaShape::kN);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
ref_.add_coord_offset({
coord.row() * Shape::kRow,
coord.column() * Shape::kColumn / Policy::LaneMmaShape::kN});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator++() {
ref_.add_coord_offset({Shape::kRow, 0});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator--() {
ref_.add_coord_offset({-Shape::kRow, 0});
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator. (vector loads)
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kN> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kRow; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
#if 0
dst_ptr[n + k * Iterations::kColumn] =
*(ref_.data() + ref_.offset({k, n * Policy::WarpShape::kColumn}) + pointer_offset / Policy::LaneMmaShape::kN);
#endif
void const *ptr = ref_.data() + ref_.offset({k, n * Policy::WarpShape::kColumn}) + pointer_offset / Policy::LaneMmaShape::kN;
arch::shared_load(dst_ptr[n + k * Iterations::kColumn], ptr);
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kN> const *src_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kM; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kN; ++n) {
*(ref_.data() + ref_.offset({k, n * Policy::WarpShape::kN}) + pointer_offset / Policy::LaneMmaShape::kN) =
src_ptr[n + k * Iterations::kN];
}
}
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, Index pointer_offset) const {
store_with_pointer_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization for B operands of column-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize
>
class MmaSimtTileIterator<Shape_, Operand::kB, Element_, layout::ColumnMajor, Policy_, PartitionsK, PartitionGroupSize> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kB;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::ColumnMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
//
// Derived quantities
//
static_assert(!(Shape::kColumn % Policy::WarpShape::kColumn),
"The warp-level GEMM N size must be divisible by the number of threads arranged along the N dimension.");
static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero.");
static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero.");
static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero.");
static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero.");
/// Thread-level shape of a fragment
using ThreadShape = MatrixShape<
Shape::kRow,
Shape::kColumn / Policy::WarpShape::kColumn
>;
static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
/// Number of individual loads
using Iterations = MatrixShape<
ThreadShape::kRow,
ThreadShape::kColumn / Policy::LaneMmaShape::kN
>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, ThreadShape::kCount>;
private:
/// Internal reference
cutlass::TensorRef<Element, layout::ColumnMajor> ref_;
/// Extent of tensor
MatrixCoord extent_;
/// Origin
MatrixCoord origin_;
/// Used to conditionally enable extents checking
bool divisible_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaSimtTileIterator(): divisible_(true) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaSimtTileIterator(
TensorRef ref,
int lane_id
): extent_(Shape::kRow, Shape::kColumn), divisible_(true) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
MatrixCoord(0, Policy::LaneMmaShape::kN);
origin_ = lane_offset;
ref.add_coord_offset(lane_offset);
ref_.reset(ref.data(), ref.stride(0));
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaSimtTileIterator(
TensorRef ref,
TensorCoord extent,
int lane_id
): extent_(extent), divisible_(false) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
MatrixCoord(0, Policy::LaneMmaShape::kN);
origin_ = lane_offset;
ref.add_coord_offset(lane_offset);
ref_.reset(ref.data(), ref.stride(0));
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
TensorCoord coord_offset(
coord.row() * Shape::kRow,
coord.column() * Shape::kColumn);
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator++() {
ref_.add_coord_offset({Shape::kRow, 0});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator--() {
ref_.add_coord_offset({-Shape::kRow, 0});
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator. (scalar loads)
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kRow; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Policy::LaneMmaShape::kN; ++i) {
MatrixCoord offset(k, n * Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN + i);
MatrixCoord access_coord = origin_ + offset;
int frag_idx = n * Policy::LaneMmaShape::kN + i + k * Iterations::kColumn;
if (divisible_ ||
(access_coord.row() < extent_.row() && access_coord.column() < extent_.column())) {
frag[frag_idx] = *(ref_.data() + ref_.offset(offset) + pointer_offset);
}
else {
frag[frag_idx] = Element();
}
}
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kN> const *src_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kM; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kN; ++n) {
*(ref_.data() + ref_.offset({k, n * Policy::WarpShape::kN}) + pointer_offset / Policy::LaneMmaShape::kN) =
src_ptr[n + k * Iterations::kN];
}
}
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, Index pointer_offset) const {
store_with_pointer_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization for C operands of column-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_
>
class MmaSimtTileIterator<Shape_, Operand::kC, Element_, layout::ColumnMajor, Policy_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = Element_;
/// Layout of accumulators in memory
using Layout = layout::ColumnMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
//
// Derived quantities
//
static_assert(
(!(Shape::kRow % Policy::WarpShape::kRow)) && (!(Shape::kColumn % Policy::WarpShape::kColumn)),
"Warp-level GEMM shape must be divisible by the arrangement of threads in the warp.");
static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero.");
static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero.");
static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero.");
static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero.");
static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero.");
static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero.");
/// Thraed-level shape of a fragment
using ThreadShape = MatrixShape<
Shape::kRow / Policy::WarpShape::kRow,
Shape::kColumn / Policy::WarpShape::kColumn
>;
static_assert(
(!(ThreadShape::kRow % Policy::LaneMmaShape::kM)) && (!(ThreadShape::kColumn % Policy::LaneMmaShape::kN)),
"Warp-level GEMM shape must be divisible by the arrangement of threads in the warp.");
/// Number of individual loads
using Iterations = MatrixShape<
ThreadShape::kRow / Policy::LaneMmaShape::kM,
ThreadShape::kColumn / Policy::LaneMmaShape::kN
>;
using Delta = MatrixShape<
Policy::WarpShape::kRow * Policy::LaneMmaShape::kM,
Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN
>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, ThreadShape::kCount>;
private:
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaSimtTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaSimtTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
ref_.add_coord_offset({
coord.row() * Shape::kRow,
coord.column() * Shape::kColumn});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator++() {
ref_.add_coord_offset({Shape::kRow, 0});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator--() {
ref_.add_coord_offset({-Shape::kRow, 0});
return *this;
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to be loaded from memory
Index pointer_offset) const { ///< linear offset (in units of Element) when loading
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Iterations::kN; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) {
Array<Element, Policy::LaneMmaShape::kM> const *src_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kM> const *>(
ref_.data() + pointer_offset + ref_.offset({0, mma_n * Delta::kN + n}));
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Iterations::kM; ++mma_m) {
Array<Element, Policy::LaneMmaShape::kM> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kM> *>(&frag) +
mma_m + Iterations::kM * (n + mma_n * Policy::LaneMmaShape::kN);
*dst_ptr = src_ptr[mma_m * Policy::WarpShape::kM];
}
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) {
Array<Element, Policy::LaneMmaShape::kM> *dst_ptr=
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kM> *>(
ref_.data() + pointer_offset + ref_.offset({0, mma_n * Delta::kColumn + n}));
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) {
Array<Element, Policy::LaneMmaShape::kM> const *src_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kM> const *>(&frag) +
mma_m + Iterations::kRow * (n + mma_n * Policy::LaneMmaShape::kN);
dst_ptr[mma_m * Policy::WarpShape::kRow] = *src_ptr;
}
}
}
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization for C operands of row-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_
>
class MmaSimtTileIterator<Shape_, Operand::kC, Element_, layout::RowMajor, Policy_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = Element_;
/// Layout of accumulators in memory
using Layout = layout::RowMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
//
// Derived quantities
//
static_assert(
(!(Shape::kRow % Policy::WarpShape::kRow)) && (!(Shape::kColumn % Policy::WarpShape::kColumn)),
"Warp-level GEMM shape must be divisible by the arrangement of threads in the warp.");
static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero.");
static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero.");
static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero.");
static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero.");
static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero.");
static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero.");
/// Thraed-level shape of a fragment
using ThreadShape = MatrixShape<
Shape::kRow / Policy::WarpShape::kRow,
Shape::kColumn / Policy::WarpShape::kColumn
>;
static_assert(
(!(ThreadShape::kRow % Policy::LaneMmaShape::kM)) && (!(ThreadShape::kColumn % Policy::LaneMmaShape::kN)),
"Warp-level GEMM shape must be divisible by the arrangement of threads in the warp.");
/// Number of individual loads
using Iterations = MatrixShape<
ThreadShape::kRow / Policy::LaneMmaShape::kM,
ThreadShape::kColumn / Policy::LaneMmaShape::kN
>;
using Delta = MatrixShape<
Policy::WarpShape::kRow * Policy::LaneMmaShape::kM,
Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN
>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, ThreadShape::kCount>;
private:
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaSimtTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaSimtTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
ref_.add_coord_offset({
coord.row() * Shape::kRow,
coord.column() * Shape::kColumn});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator++() {
ref_.add_coord_offset({Shape::kRow, 0});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator--() {
ref_.add_coord_offset({-Shape::kRow, 0});
return *this;
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to be loaded from memory
Index pointer_offset) const { ///< linear offset (in units of Element) when loading
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) {
Array<Element, Policy::LaneMmaShape::kN> const *src_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> const *>(
ref_.data() + pointer_offset + ref_.offset({mma_m * Delta::kRow + m, 0}));
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) {
Array<Element, Policy::LaneMmaShape::kN> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag) +
mma_n + Iterations::kColumn * (m + mma_m * Policy::LaneMmaShape::kM);
*dst_ptr = src_ptr[mma_n * Policy::WarpShape::kColumn];
}
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) {
Array<Element, Policy::LaneMmaShape::kN> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(
ref_.data() + pointer_offset + ref_.offset({mma_m * Delta::kRow + m, 0}));
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) {
Array<Element, Policy::LaneMmaShape::kN> const *src_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> const *>(&frag) +
mma_n + Iterations::kColumn * (m + mma_m * Policy::LaneMmaShape::kM);
dst_ptr[mma_n * Policy::WarpShape::kColumn] = *src_ptr;
}
}
}
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization for A operands of column-major-K interleaved layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK,
/// Number of KGroups per kPartition
int PartitionGroupSize
>
class MmaSimtTileIterator<Shape_, Operand::kA, Element_, layout::ColumnMajorInterleaved<4>, Policy_, PartitionsK, PartitionGroupSize> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kA;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::ColumnMajorInterleaved<4> ;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Iterleave factor
static const int kInterleave = 4;
/// Number of partitions along K dimension
static const int kPartitionsK = PartitionsK;
/// Number of KGroups per kPartition
static const int kGroupPerTile = PartitionGroupSize / Shape::kColumn;
//
// Derived quantities
//
static_assert(!(Shape::kRow % Policy::WarpShape::kRow),
"The warp-level GEMM M size must be divisible by the number of threads arranged along the M dimension.");
static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero.");
static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero.");
static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero.");
static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero.");
/// Thread-level shape of a fragment
using ThreadShape = MatrixShape<
Shape::kRow / Policy::WarpShape::kRow,
Shape::kColumn
>;
static_assert(!(ThreadShape::kRow % Policy::LaneMmaShape::kM) && !(ThreadShape::kColumn % Policy::LaneMmaShape::kK),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
/// Number of individual loads
using Iterations = MatrixShape<
ThreadShape::kRow / Policy::LaneMmaShape::kM,
ThreadShape::kColumn / Policy::LaneMmaShape::kK
>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, ThreadShape::kCount>;
private:
/// Internal reference
cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kMK>, layout::ColumnMajorInterleaved<4>> ref_;
/// group index within tile
int k_group_idx_;
public:
CUTLASS_HOST_DEVICE
MmaSimtTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaSimtTileIterator(
TensorRef ref,
int lane_id
) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
MatrixCoord(Policy::LaneMmaShape::kM, 0);
ref.add_coord_offset(lane_offset);
k_group_idx_ = 0;
ref_.reset(reinterpret_cast<Array<Element, Policy::LaneMmaShape::kMK> *>(ref.data()), ref.stride(0)/Policy::LaneMmaShape::kMK);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
ref_.add_coord_offset({
coord.row() * Shape::kRow / Policy::LaneMmaShape::kMK,
coord.column() * Shape::kColumn});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator++() {
add_tile_offset({0, 1});
if (kPartitionsK > 1) {
++k_group_idx_;
// Jump to next stage
if (k_group_idx_ == kGroupPerTile) {
k_group_idx_ = 0;
add_tile_offset({0, kGroupPerTile * (kPartitionsK-1)});
}
}
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator--() {
ref_.add_coord_offset({0, -Shape::kColumn});
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kMK > *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kMK> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kColumn; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Iterations::kRow; ++m) {
dst_ptr[m + k * Iterations::kRow] =
*((ref_.data() + ref_.offset({m * Policy::WarpShape::kRow / kInterleave,
k*Policy::LaneMmaShape::kK}) + pointer_offset / Policy::LaneMmaShape::kM));
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kMK> const *src_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kMK > *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kN; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Iterations::kM; ++m) {
*(ref_.data() + ref_.offset(m * Policy::WarpShape::kM, k) + pointer_offset / Policy::LaneMmaShape::kM) =
src_ptr[m + k * Iterations::kM];
}
}
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization for B operands of row-major k-interleaved layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK,
/// Number of KGroups per kPartition
int PartitionGroupSize
>
class MmaSimtTileIterator<Shape_, Operand::kB, Element_, layout::RowMajorInterleaved<4>, Policy_, PartitionsK, PartitionGroupSize> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kB;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::RowMajorInterleaved<4>;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Interleave factor
static const int kInterleave = 4;
/// Number of partitions along K dimension
static const int kPartitionsK = PartitionsK;
/// Number of KGroups per kPartition
static const int kGroupPerTile = PartitionGroupSize / Shape::kRow;
//
// Derived quantities
//
static_assert(!(Shape::kColumn % Policy::WarpShape::kColumn),
"The warp-level GEMM N size must be divisible by the number of threads arranged along the N dimension.");
static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero.");
static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero.");
static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero.");
static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero.");
/// Thread-level shape of a fragment
using ThreadShape = MatrixShape<
Shape::kRow,
Shape::kColumn / Policy::WarpShape::kColumn
>;
static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN) && !(ThreadShape::kRow % Policy::LaneMmaShape::kK),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
/// Number of individual loads
using Iterations = MatrixShape<
ThreadShape::kRow / Policy::LaneMmaShape::kK,
ThreadShape::kColumn / Policy::LaneMmaShape::kN
>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, ThreadShape::kCount>;
private:
/// Internal reference
cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kKN>, layout::RowMajorInterleaved<4>> ref_;
/// group index within tile
int k_group_idx_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaSimtTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaSimtTileIterator(
TensorRef ref,
int lane_id
) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
MatrixCoord(0, Policy::LaneMmaShape::kN);
ref.add_coord_offset(lane_offset);
k_group_idx_ = 0;
ref_.reset(
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kKN> *>(ref.data()),
ref.stride(0) / Policy::LaneMmaShape::kKN);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
ref_.add_coord_offset({
coord.row() * Shape::kRow,
coord.column() * Shape::kColumn / Policy::LaneMmaShape::kKN});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator++() {
add_tile_offset({1, 0});
if (kPartitionsK > 1) {
++k_group_idx_;
// Jump to next stage
if (k_group_idx_ == kGroupPerTile) {
k_group_idx_ = 0;
add_tile_offset({kGroupPerTile * (kPartitionsK-1), 0});
}
}
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaSimtTileIterator & operator--() {
ref_.add_coord_offset({-Shape::kRow, 0});
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kKN> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kKN> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kRow; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
dst_ptr[n + k * Iterations::kColumn] =
*(ref_.data() + ref_.offset({k * Policy::LaneMmaShape::kK,
n * Policy::WarpShape::kColumn / kInterleave}) + pointer_offset / Policy::LaneMmaShape::kN);
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kN> const *src_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kM; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kN; ++n) {
*(ref_.data() + ref_.offset({k, n * Policy::WarpShape::kN}) + pointer_offset / Policy::LaneMmaShape::kN) =
src_ptr[n + k * Iterations::kN];
}
}
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, Index pointer_offset) const {
store_with_pointer_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
| 59,793 | C | 30.620307 | 139 | 0.659709 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/wmma.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/wmma_array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
///< Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity (A or B)
Operand Operand,
/// Data type of operand
typename Element_,
/// Layout of operand
typename Layout_,
/// Delta between *MMA operations (in units of *WMMA operations, concept:MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads,
/// Shape of the warp in units of thread (concept: MmaTensorOpPolicy)
typename Policy_>
class MmaTensorOpWmmaMultiplicandTileIterator;
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread WMMA operation.
/// It uses nvcuda::wmma::load_matrix_sync to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
////////////////////////////////////////////////////////////////////////////////
template <
///< Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Interval between adjacent *WMMA instructions (in units of WMMA instructions)
int OpDelta_,
/// Shape of the warp in units of thread (concept: MmaTensorOpPolicy)
typename Policy_>
class MmaTensorOpWmmaMultiplicandTileIterator<
Shape_, Operand::kA, Element_, Layout_,
OpDelta_, 32, Policy_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kA;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Delta between *WMMA operations
static int const kOpDelta = OpDelta_;
/// Wmma Operator information and operation delta
using Policy = Policy_;
//
// Derived quantities
//
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Stride Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Native Wmma shape for operand A (concept MatrixShape)
using WmmaShape = MatrixShape<
Policy::Operator::Shape::kM,
Policy::Operator::Shape::kK
>;
/// Map cutlass dataype to nvcuda::wmma datatype
using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType<Element>::Type;
/// Shape of individual WMMA load / stores for operand A
using Iterations = MatrixShape<
Shape::kRow / WmmaShape::kRow,
1
>;
/// Fragment object holding a warps part
using Fragment = WmmaFragmentArray<typename Policy::Operator::FragmentA, Iterations::kCount>;
//////////////////////////////////////////////////////////////////////////////////////////////////////
/// statically assert this specialization
/////////////////////////////////////////////////////////////////////////////////////////////////////
/// This iterator is specalized for Operand A
static_assert(kOperand == Operand::kA,
"MmaTensorOpWmmaMultiplicandTileIterator may only be instantiated for A operands to warp-level Mma.");
/// Supported memory layouts
static_assert(
platform::is_same<cutlass::layout::RowMajor, Layout>::value ||
platform::is_same<cutlass::layout::ColumnMajor, Layout>::value,
"Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor");
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/////////////////////////////////////////////////////////////////////////////////////////////////////
private:
/// Shared memory base pointers - not advanced
char const *pointer_;
/// Byte offset into shared memory - advanced
Index byte_offset_;
/// Stride in units of number of elements
StrideIndex stride_;
/// Layout of shared memory
Layout layout_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): pointer_(reinterpret_cast<char const*>(ref.data())), byte_offset_(0), stride_(ref.stride(0)), layout_(ref.stride(0)) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += (offset * sizeof_bits<Element>::value) / 8;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
Index elements_offset = layout_({tile_offset.row() * Shape::kRow, tile_offset.column() * WmmaShape::kColumn});
byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator++() {
Index elements_offset = layout_({0, WmmaShape::kColumn});
byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8;
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator--() {
Index elements_offset = layout_({0, WmmaShape::kColumn});
byte_offset_ -= (elements_offset * sizeof_bits<Element>::value) / 8;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load_with_byte_offset(Fragment &frag, Index byte_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kColumn; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Iterations::kRow; ++m) {
Index load_byte_offset = layout_({m * WmmaShape::kRow, k * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8;
const WmmaDataType *ptr = reinterpret_cast<const WmmaDataType *>(pointer_ + byte_offset_ + load_byte_offset + byte_offset);
nvcuda::wmma::load_matrix_sync(frag[m], ptr, stride_);
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_byte_offset(Fragment const &frag, Index byte_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kColumn; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Iterations::kRow; ++m) {
Index store_byte_offset = layout_({m * WmmaShape::kRow, k * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8;
WmmaDataType *ptr = reinterpret_cast<WmmaDataType *>(pointer_ + byte_offset_ + store_byte_offset + byte_offset);
nvcuda::wmma::store_matrix_sync(ptr, frag[m], stride_);
}
}
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_byte_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread WMMA operation.
/// It uses nvcuda::wmma::load_matrix_sync to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
////////////////////////////////////////////////////////////////////////////////
template <
///< Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Interval between adjacent *WMMA instructions (in units of WMMA instructions)
int OpDelta_,
/// Shape of the warp in units of thread (concept: MmaTensorOpPolicy)
typename Policy_>
class MmaTensorOpWmmaMultiplicandTileIterator<
Shape_, Operand::kB, Element_, Layout_,
OpDelta_, 32, Policy_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kB;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Delta between *WMMA operations
static int const kOpDelta = OpDelta_;
/// Wmma Operator information and operation delta
using Policy = Policy_;
//
// Derived quantities
//
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Stride Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Native Wmma shape (concept MatrixShape)
using WmmaShape = MatrixShape<
Policy::Operator::Shape::kK,
Policy::Operator::Shape::kN
>;
/// Map cutlass dataype to nvcuda::wmma datatype
using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType<Element>::Type;
/// Shape of individual WMMA load / stores for operand B
using Iterations = MatrixShape<
1,
Shape::kColumn / WmmaShape::kColumn
>;
/// Fragment object holding a warps part
using Fragment = WmmaFragmentArray<typename Policy::Operator::FragmentB, Iterations::kCount>;
//////////////////////////////////////////////////////////////////////////////////////////////////////
/// statically asserts this specialization
/////////////////////////////////////////////////////////////////////////////////////////////////////
/// This iterator is specalized for Operand B
static_assert(kOperand == Operand::kB,
"MmaTensorOpWmmaMultiplicandTileIterator may only be instantiated for B operands to warp-level Mma.");
/// Supported memory layouts
static_assert(
platform::is_same<cutlass::layout::RowMajor, Layout>::value ||
platform::is_same<cutlass::layout::ColumnMajor, Layout>::value,
"Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor");
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/////////////////////////////////////////////////////////////////////////////////////////////////////
private:
/// Shared memory base pointers - not advanced
char const *pointer_;
/// Byte offset into shared memory - advanced
Index byte_offset_;
/// Stride in units of number of elements
StrideIndex stride_;
/// Layout of shared memory
Layout layout_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): pointer_(reinterpret_cast<char const*>(ref.data())), byte_offset_(0), stride_(ref.stride(0)), layout_(ref.stride(0)) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += (offset * sizeof_bits<Element>::value) / 8;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
Index elements_offset = layout_({tile_offset.row() * WmmaShape::kRow, tile_offset.column() * Shape::kColumn});
byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator++() {
Index elements_offset = layout_({WmmaShape::kRow, 0});
byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8;
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator--() {
Index elements_offset = layout_({WmmaShape::kRow, 0});
byte_offset_ -= (elements_offset * sizeof_bits<Element>::value) / 8;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpWmmaMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load_with_byte_offset(Fragment &frag, Index byte_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kRow; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
Index load_byte_offset = layout_({k * WmmaShape::kRow, n * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8;
const WmmaDataType *ptr = reinterpret_cast<const WmmaDataType *>(pointer_ + byte_offset_ + load_byte_offset + byte_offset);
nvcuda::wmma::load_matrix_sync(frag[n], ptr, stride_);
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_byte_offset(Fragment const &frag, Index byte_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kRow; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
Index store_byte_offset = layout_({k * WmmaShape::kRow, n * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8;
WmmaDataType *ptr = reinterpret_cast<WmmaDataType *>(pointer_ + byte_offset_ + store_byte_offset + byte_offset);
nvcuda::wmma::store_matrix_sync(ptr, frag[n], stride_);
}
}
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_byte_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
////////////////////////////////////////////////////////////////////////////////
template <
///< Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Interval between adjacent *WMMA instructions (in units of WMMA instructions, concept: MatrixShape)
typename OpDelta_,
/// Shape of the warp in units of thread (concept: MmaTensorOpPolicy)
typename Policy_>
class MmaTensorOpWmmaAccumulatorTileIterator;
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread WMMA operation.
/// It uses nvcuda::wmma::store_matrix_sync to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept |
/// WriteableRandomAccessContiguousTileIteratorConcept
///
////////////////////////////////////////////////////////////////////////////////
template <
///< Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Interval between adjacent *WMMA instructions (in units of WMMA instructions)
typename OpDelta_,
/// Shape of the warp in units of thread (concept: MmaTensorOpPolicy)
typename Policy_>
class MmaTensorOpWmmaAccumulatorTileIterator
{
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Wmma Operator information and operation delta
using Policy = Policy_;
//
// Derived quantities
//
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Native Wmma shape (concept MatrixShape)
using WmmaShape = MatrixShape<
Policy::Operator::Shape::kM,
Policy::Operator::Shape::kN
>;
/// Map cutlass dataype to nvcuda::wmma datatype
using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType<Element>::Type;
/// Map cutlass::layout to nvuda::wmma::layout_t enum
static nvcuda::wmma::layout_t const WmmaLayout = cutlass::arch::CutlassToWmmaLayout<Layout>::value;
/// Shape of individual WMMA load / stores for accumulator
using Iterations = MatrixShape<
Shape::kRow / WmmaShape::kRow,
Shape::kColumn / WmmaShape::kColumn
>;
/// Fragment object holding a thread's part of a tile
using Fragment = WmmaFragmentArray<typename Policy::Operator::FragmentC, Iterations::kCount>;
//////////////////////////////////////////////////////////////////////////////////////////////////////
/// statically asserts this specialization
/////////////////////////////////////////////////////////////////////////////////////////////////////
/// Supported layouts
static_assert(
platform::is_same<cutlass::layout::RowMajor, Layout>::value ||
platform::is_same<cutlass::layout::ColumnMajor, Layout>::value,
"Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor");
private:
/// Internal reference
cutlass::TensorRef<Element, Layout> ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
): ref_(ref) { }
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset({tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator & operator++() {
ref_.add_coord_offset({Shape::kRow, 0});
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator & operator--() {
ref_.add_coord_offset({-Shape::kRow, 0});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpWmmaAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Iterations::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
const WmmaDataType * ptr = reinterpret_cast<const WmmaDataType*> (ref_.data() + ref_.offset({m * WmmaShape::kRow, n * WmmaShape::kColumn}) + pointer_offset);
nvcuda::wmma::load_matrix_sync(frag[m * Iterations::kColumn + n], ptr, ref_.stride()[0], WmmaLayout);
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Iterations::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
WmmaDataType * ptr = reinterpret_cast<WmmaDataType*> (ref_.data() + ref_.offset({m * WmmaShape::kRow, n * WmmaShape::kColumn}) + pointer_offset);
nvcuda::wmma::store_matrix_sync(ptr, frag[m * Iterations::kColumn + n], ref_.stride()[0], WmmaLayout);
}
}
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
#endif // if defined(CUTLASS_ARCH_WMMA_ENABLED)
| 27,101 | C | 32.62531 | 165 | 0.653297 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/default_mma_complex_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Default warp-level GEMM operators selected by data type, size, and layouts of operands.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/warp/mma_complex_tensor_op.h"
#include "cutlass/gemm/warp/mma_complex_tensor_op_fast_f32.h"
#include "cutlass/gemm/warp/mma_gaussian_complex_tensor_op.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Complex transform on A operand
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transform on B operand
ComplexTransform TransformB = ComplexTransform::kNone,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_ = arch::OpMultiplyAddComplex>
struct DefaultMmaComplexTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex<T>*complex<T> case
// 4 real-valued mma operations
// A = (ar + j ai), B (br +j bi), D = AB
// D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Real-valued underlying type of complex-valued A operand
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Real-valued underlying type of complex-valued B operand
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Real-valued underlying type of complex-valued C operand
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB>
struct DefaultMmaComplexTensorOp<
WarpShape_,
InstructionShape_,
complex<RealElementA>,
LayoutA,
complex<RealElementB>,
LayoutB,
complex<RealElementC>,
LayoutC,
TransformA,
TransformB,
arch::OpMultiplyAddComplex> {
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
InstructionShape_,
32,
RealElementA,
cutlass::layout::RowMajor,
RealElementB,
cutlass::layout::ColumnMajor,
RealElementC,
cutlass::layout::RowMajor,
arch::OpMultiplyAdd>,
cutlass::MatrixShape<1, 1>
>;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaComplexTensorOp<
WarpShape_,
complex<RealElementA>,
LayoutA,
complex<RealElementB>,
LayoutB,
complex<RealElementC>,
LayoutC,
Policy,
TransformA,
TransformB>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex<T>*complex<T> case using GaussianComplex operation
// 3 real-valued mma operations
// A = (ar + j ai), B = (br +j bi), D = AB
// P1 = (ar + ai) * br, P2 = - ar * (br - bi), P3 = ai * (br + bi)
// D = dr + j di = (P1 - P3) + j (P1 + P2)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Real-valued underlying type of complex-valued A operand
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Real-valued underlying type of complex-valued B operand
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Real-valued underlying type of complex-valued C operand
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB>
struct DefaultMmaComplexTensorOp<
WarpShape_,
InstructionShape_,
complex<RealElementA>,
LayoutA,
complex<RealElementB>,
LayoutB,
complex<RealElementC>,
LayoutC,
TransformA,
TransformB,
arch::OpMultiplyAddGaussianComplex> {
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
InstructionShape_,
32,
RealElementA,
cutlass::layout::RowMajor,
RealElementB,
cutlass::layout::ColumnMajor,
RealElementC,
cutlass::layout::RowMajor,
arch::OpMultiplyAdd>,
cutlass::MatrixShape<1, 1>
>;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaGaussianComplexTensorOp<
WarpShape_,
complex<RealElementA>,
LayoutA,
complex<RealElementB>,
LayoutB,
complex<RealElementC>,
LayoutC,
Policy,
TransformA,
TransformB>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization - input and output types are complex<float>*complex<float>
// Use TF32 tensor operation internally
// 4 real-valued mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 operations on TF32
// A = (ar + j ai), B (br +j bi), D = AB
// D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB>
struct DefaultMmaComplexTensorOp<
WarpShape_,
InstructionShape_,
complex<float>,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
TransformA,
TransformB,
arch::OpMultiplyAddComplex> {
// Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 mma instruction
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
InstructionShape_,
32,
tfloat32_t,
cutlass::layout::RowMajor,
tfloat32_t,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::RowMajor,
arch::OpMultiplyAdd>,
cutlass::MatrixShape<1, 1>
>;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaComplexTensorOp<
WarpShape_,
complex<float>,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
Policy,
TransformA,
TransformB>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization - input and output types are complex<float>*complex<float>
// Use BF16 tensor operation internally
// 4 real-valued mma.sync.aligned.m16n8k8.f32.bf16.bf16.f32 operations on BF16
// A = (ar + j ai), B (br +j bi), D = AB
// D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB>
struct DefaultMmaComplexTensorOp<
WarpShape_,
InstructionShape_,
complex<float>,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
TransformA,
TransformB,
arch::OpMultiplyAddFastBF16> {
// Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.bf16.bf16.f32 mma instruction
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
InstructionShape_,
32,
bfloat16_t,
cutlass::layout::RowMajor,
bfloat16_t,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::RowMajor,
arch::OpMultiplyAdd>,
cutlass::MatrixShape<1, 1>
>;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaComplexTensorOp<
WarpShape_,
complex<float>,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
Policy,
TransformA,
TransformB>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization - input and output types are complex<float>*complex<float>
// Use F16 tensor operation internally
// 4 real-valued mma.sync.aligned.m16n8k8.f32.f16.f16.f32 operations on F16
// A = (ar + j ai), B (br +j bi), D = AB
// D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB>
struct DefaultMmaComplexTensorOp<
WarpShape_,
InstructionShape_,
complex<float>,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
TransformA,
TransformB,
arch::OpMultiplyAddFastF16> {
// Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.f16.f16.f32 mma instruction
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
InstructionShape_,
32,
half_t,
cutlass::layout::RowMajor,
half_t,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::RowMajor,
arch::OpMultiplyAdd>,
cutlass::MatrixShape<1, 1>
>;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaComplexTensorOp<
WarpShape_,
complex<float>,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
Policy,
TransformA,
TransformB>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 3xTF32 or 4xTF32 (fast and accurate complex<float> operation)
/// Partial specialization - input and output types are complex<float> * complex<float>
// Use 3xTF32 or 4xTF32 tensor operation internally
// 4 real-valued mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 operations on TF32
// A = (ar + j ai), B (br +j bi), D = AB
// D = dr + j di = 3x[(ar*br - ai*bi) + j (ar*bi + ai*br)]
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB>
struct DefaultMmaComplexTensorOp<
WarpShape_,
InstructionShape_,
complex<float>,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
TransformA,
TransformB,
arch::OpMultiplyAddComplexFastF32> {
// Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 mma instruction
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
InstructionShape_,
32,
tfloat32_t,
cutlass::layout::RowMajor,
tfloat32_t,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::RowMajor,
arch::OpMultiplyAdd>,
cutlass::MatrixShape<1, 1>
>;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaComplexTensorOpFastF32<
WarpShape_,
complex<float>,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
Policy,
TransformA,
TransformB>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex<double>*complex<double> case
// 4 real-valued mma.sync.aligned.m16n8k4.f64.f64.f64.f64 operations
// A = (ar + j ai), B (br +j bi), D = AB
// D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename WarpShape_,
/// Real-valued underlying type of complex-valued A operand
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Real-valued underlying type of complex-valued B operand
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Real-valued underlying type of complex-valued C operand
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB>
struct DefaultMmaComplexTensorOp<
WarpShape_,
GemmShape<16, 8, 4>,
complex<RealElementA>,
LayoutA,
complex<RealElementB>,
LayoutB,
complex<RealElementC>,
LayoutC,
TransformA,
TransformB,
arch::OpMultiplyAddComplex> {
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
GemmShape<16, 8, 4>,
32,
RealElementA,
cutlass::layout::RowMajor,
RealElementB,
cutlass::layout::ColumnMajor,
RealElementC,
cutlass::layout::RowMajor,
arch::OpMultiplyAdd>,
cutlass::MatrixShape<1, 1>
>;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaComplexTensorOp<
WarpShape_,
complex<RealElementA>,
LayoutA,
complex<RealElementB>,
LayoutB,
complex<RealElementC>,
LayoutC,
Policy,
TransformA,
TransformB,
true>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex<T>*complex<T> case using GaussianComplex operation
// 3 real-valued mma.sync.aligned.m16n8k4.f64.f64.f64.f64 operations
// A = (ar + j ai), B = (br +j bi), D = AB
// P1 = (ar + ai) * br, P2 = - ar * (br - bi), P3 = ai * (br + bi)
// D = dr + j di = (P1 - P3) + j (P1 + P2)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename WarpShape_,
/// Real-valued underlying type of complex-valued A operand
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Real-valued underlying type of complex-valued B operand
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Real-valued underlying type of complex-valued C operand
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB>
struct DefaultMmaComplexTensorOp<
WarpShape_,
GemmShape<16, 8, 4>,
complex<RealElementA>,
LayoutA,
complex<RealElementB>,
LayoutB,
complex<RealElementC>,
LayoutC,
TransformA,
TransformB,
arch::OpMultiplyAddGaussianComplex> {
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
GemmShape<16, 8, 4>,
32,
RealElementA,
cutlass::layout::RowMajor,
RealElementB,
cutlass::layout::ColumnMajor,
RealElementC,
cutlass::layout::RowMajor,
arch::OpMultiplyAdd>,
cutlass::MatrixShape<1, 1>
>;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::MmaGaussianComplexTensorOp<
WarpShape_,
complex<RealElementA>,
LayoutA,
complex<RealElementB>,
LayoutB,
complex<RealElementC>,
LayoutC,
Policy,
TransformA,
TransformB,
true>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
| 20,553 | C | 32.530179 | 107 | 0.597918 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
Operand Operand,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads,
/// Number of partitions along K dimension
int PartitionsK_ = 1>
class MmaTensorOpMultiplicandTileIterator;
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
64>,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, 64>;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kContiguous % InstructionShape::kContiguous),
"Shape of warp-level Mma must be divisible by operator shape.");
// Determine number of elements along outer dimension per individual LDSM op
static int const kLdsmOpOuter = Layout::kElementsPerAccess;
static int const kLdsmOpInner = 8;
static_assert(!(Shape::kContiguous % kLdsmOpOuter),
"Shape of warp-level mma must be divisible by LDSM's fundamental tile size.");
static_assert(!(Shape::kStrided % kLdsmOpInner),
"Shape of warp-level mma must be divisible by LDSM's fundamental tile size.");
/// Shape of one individual LDSM instruction
static int const LdsmShapeStrided =
InstructionShape::kStrided / kLdsmOpInner;
static int const LdsmShapeContiguous = 4 / LdsmShapeStrided;
using LdsmShape =
layout::PitchLinearShape<LdsmShapeContiguous, LdsmShapeStrided>;
/// Number and arrangement of LDSM instructions
using LdsmIterations = layout::PitchLinearShape<
Shape::kContiguous / Layout::kElementsPerAccess / LdsmShapeContiguous,
1>;
/// Number of groups for each tile
static int const kGroupsPerTile =
Shape::kStrided / InstructionShape::kStrided;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Number of internal pointers needed to reference shared memory
static int const kPointerCount =
Layout::TileShape::kContiguous / Policy::LdsmShape::kContiguous;
/// Pointer type used for accesses
using AccessType = Array<Element, Layout::kElementsPerAccess>;
/// Internal counter used to jump to next K partition
int k_group_idx_;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_[kPointerCount];
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0),
k_group_idx_(0) {
int quad_pair = (lane_id >> 3);
int quad_quad = (lane_id >> 4);
int lane_in_quad = (lane_id & 3);
int lane_in_quad_pair = (lane_id & 7);
int lane_in_quad_quad = (lane_id & 15);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPointerCount; ++i) {
int partition_contiguous_idx = -1;
int access_contiguous_idx = -1;
int access_strided_idx = -1;
if (Policy::LdsmShape::kContiguous == 4) {
// Matrix multiply 1688 A/B
// Q0 Q1 Q2 Q3 (Q stands for 1 8x128bit block).
// Four blocks are next to each other in the contiguous dimension.
partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ i);
access_contiguous_idx = (quad_pair ^ lane_in_quad);
access_strided_idx = lane_in_quad_pair;
}
else if (Policy::LdsmShape::kContiguous == 2 &&
kOperand == Operand::kA) {
// Matrix multiply 16816 A
// Q0 Q1
// Q2 Q3
partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ (i >> 1));
access_contiguous_idx =
(((quad_pair & 1) + ((i & 1) << 1)) ^ lane_in_quad);
access_strided_idx = lane_in_quad_pair + (lane_id >> 4 << 3);
} else if (Policy::LdsmShape::kContiguous == 2 &&
kOperand == Operand::kB) {
// Matrix multiply 16816 B
// Q0 Q2
// Q1 Q3
partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ (i >> 1));
access_contiguous_idx = ((quad_quad + ((i & 1) << 1)) ^ lane_in_quad);
access_strided_idx = lane_in_quad_quad;
} else if (Policy::LdsmShape::kContiguous == 1) {
// Matrix multiply 16832.SP B
// Q0
// Q1
// Q2
// Q3
partition_contiguous_idx = ((lane_in_quad_pair >> 2) ^ (i >> 2));
access_contiguous_idx = ((i & 3) ^ lane_in_quad);
access_strided_idx = lane_id;
}
int access_contiguous =
partition_contiguous_idx * Layout::PartitionShape::kContiguous +
access_contiguous_idx;
int access_strided = access_strided_idx;
pointer_[i] = reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int contiguous_offset = tile_offset.contiguous();
if (Shape::kContiguous ==
Layout::PartitionShape::kContiguous * Layout::kElementsPerAccess) {
if (tile_offset.contiguous() % 2) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPointerCount / 2; ++i) {
AccessType const *tmp_pointer = pointer_[i];
pointer_[i] = pointer_[i + kPointerCount / 2];
pointer_[i + kPointerCount / 2] = tmp_pointer;
}
}
contiguous_offset = (tile_offset.contiguous() >> 1) << 1;
}
int offset = (tile_offset.strided() * InstructionShape::kStrided) *
stride_ * Layout::kElementsPerAccess +
contiguous_offset * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
add_tile_offset({0, 1});
if (kPartitionsK > 1) {
++k_group_idx_;
// Jump to next stage
if (k_group_idx_ == Policy::kGroupsPerTile) {
k_group_idx_ = 0;
add_tile_offset(
{0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)});
}
}
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) *
Layout::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
Array<unsigned, Policy::LdsmShape::kCount> *fetch_ptr =
reinterpret_cast<Array<unsigned, Policy::LdsmShape::kCount> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsmIterations::kContiguous;
AccessType const *source_ptr =
pointer_[c % kPointerCount] +
Layout::TileShape::kContiguous * (c / kPointerCount) +
Policy::kLdsmOpInner * Policy::LdsmShape::kStrided * s * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
cutlass::arch::ldsm<layout::ColumnMajor, Policy::LdsmShape::kCount>(
fetch_ptr[access_idx],
source_byte_ptr
);
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no op
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread MMA.TF32 NT TensorOps. It
/// uses LDS.32 to load from shared memory and therefore must be initialized
/// with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCongruous<32, 32>, InstructionShape_,
OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand == Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for "
"A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCongruous<32, 32>;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kContiguous % InstructionShape::kContiguous),
"Shape of warp-level Mma must be divisible by operator shape.");
// Determine number of elements along outer dimension per individual 32bit
// shared memory load op. Every one warp of 32bit shared memory load loads
// 8x4 elements
static int const kLdsOpInner = Layout::TileShape::kStrided;
static int const kLdsOpOuter = kThreads / kLdsOpInner;
static_assert(!(Shape::kContiguous % kLdsOpOuter),
"Shape of warp-level mma must be divisible by 32bit "
"fundamental tile size.");
static_assert(!(Shape::kStrided % kLdsOpInner),
"Shape of warp-level mma must be divisible by 32bit "
"fundamental tile size.");
/// Number of 32 bit shared memory load instructions needed by one MMA instruction
/// 1688 A 2x2
/// 1688 B 1x2
/// 16816 B 1x4
static int const LdsShapeContiguous =
InstructionShape::kContiguous / kLdsOpOuter;
static int const LdsShapeStrided = InstructionShape::kStrided / kLdsOpInner;
using LdsShape =
layout::PitchLinearShape<LdsShapeContiguous, LdsShapeStrided>;
/// Number and arrangement of LDS instructions
using LdsIterations = layout::PitchLinearShape<
Shape::kContiguous / LdsShapeContiguous / kLdsOpOuter, 1>;
/// Number of groups for each tile
static int const kGroupsPerTile =
Shape::kStrided / InstructionShape::kStrided;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Number of internal pointers needed to reference shared memory
static int const kPointerCount = Layout::TileShape::kContiguous *
Layout::kElementsPerAccess /
Policy::kLdsOpOuter;
/// Vectorized access is not used
static int const kElementsPerAccess = 1;
/// Pointer type used for accesses
using AccessType = Element;
/// Internal counter used to jump to next K partition
int k_group_idx_;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_[kPointerCount];
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() : stride_(0), byte_offset_(0) {}
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id)
: stride_(ref.stride(0)), byte_offset_(0), k_group_idx_(0) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPointerCount; ++i) {
int access_strided = lane_id % Policy::kLdsOpInner;
int access_contiguous = (lane_id / Policy::kLdsOpInner) +
(access_strided ^ i) * Policy::kLdsOpOuter;
pointer_[i] = reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
int contiguous_offset = tile_offset.contiguous();
if (Shape::kContiguous ==
Layout::TileShape::kContiguous * Layout::kElementsPerAccess / 2) {
if (tile_offset.contiguous() % 2) {
// Matrix multiply 1688 pointer_[0] <=> pointer_[4] pointer_[1] <=> pointer_[5]
// pointer_[2] <=> pointer_[6] pointer_[3] <=> pointer_[7]
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPointerCount / 2; ++i) {
AccessType const *tmp_pointer = pointer_[i];
pointer_[i] = pointer_[i + kPointerCount / 2];
pointer_[i + kPointerCount / 2] = tmp_pointer;
}
}
contiguous_offset = (tile_offset.contiguous() >> 1) << 1;
}
int offset = (tile_offset.strided() * InstructionShape::kStrided) * stride_ +
contiguous_offset * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator++() {
add_tile_offset({0, 1});
if (kPartitionsK > 1) {
++k_group_idx_;
// Jump to next stage
if (k_group_idx_ == Policy::kGroupsPerTile) {
k_group_idx_ = 0;
add_tile_offset(
{0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)});
}
}
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &operator--() {
byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) *
kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { load_with_byte_offset(frag, 0); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
Element *fetch_ptr = reinterpret_cast<Element *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int ss = 0; ss < Policy::LdsShape::kStrided; ++ss) {
CUTLASS_PRAGMA_UNROLL
for (int cc = 0; cc < Policy::LdsShape::kContiguous; ++cc) {
int access_idx =
cc + (ss + (c + s * Policy::LdsIterations::kContiguous) *
Policy::LdsShape::kStrided) *
Policy::LdsShape::kContiguous;
int access_idx_contiguous = cc + c * Policy::LdsShape::kContiguous;
int access_idx_strided =
(ss + s * Policy::LdsShape::kStrided) * Policy::kLdsOpInner;
AccessType const *source_ptr =
pointer_[access_idx_contiguous % kPointerCount] +
Layout::TileShape::kContiguous * Layout::kElementsPerAccess *
(access_idx_contiguous / kPointerCount) +
access_idx_strided * stride_;
char const *source_byte_ptr =
reinterpret_cast<char const *>(source_ptr) + byte_offset +
byte_offset_;
fetch_ptr[access_idx] =
*reinterpret_cast<Element const *>(source_byte_ptr);
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous /
Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no op
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, int(128 / sizeof(Element_))>,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA,
"MmaTensorOpMultiplicandIterator for ColumnMajor Congruous may "
"only be instantiated for A operand to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, int(128 / sizeof(Element_))>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element_))>,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared
/// memory and therefore must be initialized with a TensorRef to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, int(128 / sizeof(Element_))>,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kB,
"MmaTensorOpMultiplicandIterator for RowMajor Congruous may "
"only be instantiated for B operand to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, int(128 / sizeof(Element_))>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element_))>,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.strided(), tile_offset.contiguous()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to
/// load from shared memory and therefore must be initialized with a TensorRef
/// to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Element number when the layout crosses (in units of elements)
int Crosswise,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Crosswise>,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand == Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for "
"A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Element number when the layout crosses
static int const kCrosswise = Crosswise;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, kCrosswise>;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kContiguous % InstructionShape::kContiguous),
"Shape of warp-level Mma must be divisible by operator shape.");
// Determine number of elements along outer dimension per individual LDSM op
static int const kLdsmOpOuter = Layout::kElementsPerAccess;
static int const kLdsmOpInner = 8;
static_assert(!(Shape::kContiguous % kLdsmOpOuter),
"Shape of warp-level mma must be divisible by LDSM's "
"fundamental tile size.");
static_assert(!(Shape::kStrided % kLdsmOpInner),
"Shape of warp-level mma must be divisible by LDSM's "
"fundamental tile size.");
/// Shape of one individual LDSM instruction
static int const LdsmShapeContiguous =
InstructionShape::kContiguous / kLdsmOpOuter;
static int const LdsmShapeStrided =
((4 / LdsmShapeContiguous * kLdsmOpInner) > Shape::kStrided)
? (Shape::kStrided / kLdsmOpInner)
: (4 / LdsmShapeContiguous);
using LdsmShape =
layout::PitchLinearShape<LdsmShapeContiguous, LdsmShapeStrided>;
/// Number and arrangement of LDSM instructions
using LdsmIterations =
layout::PitchLinearShape<1, Shape::kStrided / kLdsmOpInner /
LdsmShape::kStrided>;
///
static int const kGroupsPerTile = Layout::TileShape::kContiguous /
Layout::kFactor / LdsmShape::kContiguous;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = Array<Element, Layout::kElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, Shape::kStrided *
InstructionShape::kContiguous / kThreads>;
private:
/// Total number of sections. The memory is divided into stages. One stage
/// can store one tile. Stage is divided into sections. Interleaved layout
/// can have multiple sections in a stage. The rest layout only has one section
/// in a stage.
int sections_;
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
/// Internal counter used to determine when to increment byte offset and when
/// to XOR it
int k_group_idx_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator()
: pointer_(nullptr),
sections_(0),
stride_(0),
byte_offset_(0),
k_group_idx_(0) {}
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id)
: pointer_(reinterpret_cast<AccessType const *>(ref.data())),
sections_(ref.stride(0) / kCrosswise),
// stride_ = kCrosswise x sections_ x kFactor
stride_(ref.stride(0) * Layout::kFactor / Layout::kElementsPerAccess),
byte_offset_(0),
k_group_idx_(0) {
// Warp level iterator at most use double buffer to hide latency. If there
// are more than 2 sections, every stage should have more than 1 section.
// Turing silicon requires all 32 threads in a warp provide valid addresses
// even for LDSM.1 and LDSM.2
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 750))
lane_id = lane_id % (Policy::LdsmShape::kCount * Policy::kLdsmOpInner);
#endif
int quad_quad = (lane_id >> 4);
int quad_pair = (lane_id >> 3);
int lane_in_pair = (lane_id & 1);
int lane_in_quad = (lane_id & 3);
int lane_in_quad_pair = (lane_id & 7);
int lane_in_quad_quad = (lane_id & 15);
int partition_contiguous_idx = -1;
int access_contiguous_idx = -1;
int access_strided_idx = -1;
if (Layout::kFactor == 4) {
// Super Integer matrix multiply Interleaved-32
int factor_in_partition =
(Layout::PartitionShape::kContiguous * Layout::kFactor /
Layout::TileShape::kContiguous);
if (Policy::LdsmShape::kStrided == Policy::LdsmShape::kCount) {
// Integer matrix multiply 8816 A/B
partition_contiguous_idx = lane_in_quad / factor_in_partition;
access_contiguous_idx = ((lane_in_pair * factor_in_partition) ^
(lane_in_quad_quad / Layout::kFactor));
access_strided_idx = lane_id / Layout::kFactor;
}
else if (Policy::LdsmShape::kStrided ==
(Policy::LdsmShape::kCount / 2) &&
kOperand == Operand::kA) {
// Integer matrix multiply 16832 A
partition_contiguous_idx = lane_in_quad / factor_in_partition;
access_strided_idx = lane_in_quad_quad / Layout::kFactor;
access_contiguous_idx =
((lane_in_pair * factor_in_partition + quad_quad) ^
access_strided_idx);
}
else if (Policy::LdsmShape::kStrided ==
(Policy::LdsmShape::kCount / 2) &&
kOperand == Operand::kB) {
// Integer matrix multiply 16832 B
partition_contiguous_idx = lane_in_quad / factor_in_partition;
access_strided_idx = lane_in_quad_pair / Layout::kFactor + quad_quad * 2;
access_contiguous_idx =
((lane_in_pair * factor_in_partition + ((lane_id & 8) >> 3)) ^
access_strided_idx);
}
} else if (Layout::kFactor == 2) {
// Super Matrix multiply kBlock = 32
if (Policy::LdsmShape::kStrided == Policy::LdsmShape::kCount) {
// Matrix multiply 1688 A/B
// (Q stands for 1 8x128bit block).
// Q0
// Q1
// Q2
// Q3
// Four blocks are next to each other in the strided dimension.
partition_contiguous_idx = (lane_id % Layout::kFactor);
access_contiguous_idx = (lane_in_quad_pair / Layout::kFactor);
access_strided_idx = lane_id / Layout::kFactor;
}
else if (Policy::LdsmShape::kStrided ==
(Policy::LdsmShape::kCount / 2) &&
kOperand == Operand::kA) {
// Matrix multiply 16816|1688.TF32 A
// Q0 Q2
// Q1 Q3
partition_contiguous_idx = (lane_id % Layout::kFactor);
access_contiguous_idx =
(quad_quad ^ (lane_in_quad_pair / Layout::kFactor));
access_strided_idx = (lane_in_quad_quad / Layout::kFactor);
} else if (Policy::LdsmShape::kStrided ==
(Policy::LdsmShape::kCount / 2) &&
kOperand == Operand::kB) {
// Matrix multiply 16816|1688.TF32 B
// Q0 Q1
// Q2 Q3
partition_contiguous_idx = (lane_id % Layout::kFactor);
access_contiguous_idx =
((quad_pair & 1) ^ (lane_in_quad_pair / Layout::kFactor));
access_strided_idx =
(lane_in_quad_pair + (lane_id >> 4 << 3)) / Layout::kFactor;
}
else if (Policy::LdsmShape::kContiguous == Policy::LdsmShape::kCount) {
// Matrix multiply 16832.SP B
// Q0 Q1 Q2 Q3
partition_contiguous_idx = (lane_id % Layout::kFactor);
access_contiguous_idx =
(quad_pair ^ (lane_in_quad_pair / Layout::kFactor));
access_strided_idx = lane_in_quad_pair / Layout::kFactor;
}
} else if (Layout::kFactor == 1) {
// Super Matrix multiply kBlock = 64
if (Policy::LdsmShape::kStrided == Policy::LdsmShape::kCount) {
// Q0
// Q1
// Q2
// Q3
partition_contiguous_idx = (lane_in_quad_pair >> 2);
access_contiguous_idx = lane_in_quad;
access_strided_idx = lane_id;
}
else if (Policy::LdsmShape::kStrided ==
(Policy::LdsmShape::kCount / 2) &&
kOperand == Operand::kA) {
// Matrix multiply 16816|1688.TF32 A
// Q0 Q2
// Q1 Q3
partition_contiguous_idx = (lane_in_quad_pair >> 2);
access_contiguous_idx = (quad_quad ^ lane_in_quad);
access_strided_idx = lane_in_quad_quad;
} else if (Policy::LdsmShape::kStrided ==
(Policy::LdsmShape::kCount / 2) &&
kOperand == Operand::kB) {
// Matrix multiply 16816|1688.TF32 B
// Q0 Q1
// Q2 Q3
partition_contiguous_idx = (lane_in_quad_pair >> 2);
access_contiguous_idx = ((quad_pair & 1) ^ lane_in_quad);
access_strided_idx = lane_in_quad_pair + (lane_id >> 4 << 3);
}
else if (Policy::LdsmShape::kContiguous == Policy::LdsmShape::kCount) {
// Matrix multiply 16832.SP B
// Q0 Q1 Q2 Q3
partition_contiguous_idx = (lane_in_quad_pair >> 2);
access_contiguous_idx = (quad_pair ^ lane_in_quad);
access_strided_idx = lane_in_quad_pair;
}
}
int access_contiguous =
partition_contiguous_idx * Layout::PartitionShape::kContiguous +
access_contiguous_idx;
int access_strided = access_strided_idx;
byte_offset_ = (access_contiguous + access_strided * stride_) *
sizeof_bits<Element>::value * Layout::kElementsPerAccess / 8;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof_bits<Element>::value / 8;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
int whole_tiles = tile_offset.contiguous() / Policy::kGroupsPerTile;
int k_groups_delta = tile_offset.contiguous() % Policy::kGroupsPerTile;
byte_offset_ ^= k_groups_delta * sizeof_bits<Element>::value *
Layout::kElementsPerAccess *
Policy::LdsmShape::kContiguous / 8;
pointer_ +=
tile_offset.strided() * stride_ * Shape::kStrided / Layout::kFactor +
whole_tiles * stride_ / sections_;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(
TensorCoord const &tile_offset) {
int whole_tiles = tile_offset.contiguous() / Policy::kGroupsPerTile;
int k_groups_delta = tile_offset.contiguous() % Policy::kGroupsPerTile;
if (k_groups_delta < 0) {
whole_tiles -= 1;
k_groups_delta += Policy::kGroupsPerTile;
}
if ((Policy::kGroupsPerTile / kPartitionsK) >= 2) {
byte_offset_ ^= (k_groups_delta & 1) * Policy::LdsmShape::kContiguous *
sizeof_bits<Element>::value *
Layout::kElementsPerAccess / 8;
}
if ((Policy::kGroupsPerTile / kPartitionsK) >= 4) {
byte_offset_ ^= ((k_groups_delta + (k_group_idx_ & 1)) & 2) *
Policy::LdsmShape::kContiguous *
sizeof_bits<Element>::value *
Layout::kElementsPerAccess / 8;
}
if ((Policy::kGroupsPerTile / kPartitionsK) == 8) {
byte_offset_ ^= ((k_groups_delta + (k_group_idx_ & 3)) & 4) *
Policy::LdsmShape::kContiguous *
sizeof_bits<Element>::value *
Layout::kElementsPerAccess / 8;
}
k_group_idx_ += k_groups_delta;
whole_tiles += k_group_idx_ / (Policy::kGroupsPerTile / kPartitionsK);
k_group_idx_ = k_group_idx_ % (Policy::kGroupsPerTile / kPartitionsK);
pointer_ +=
tile_offset.strided() * stride_ * Shape::kStrided / Layout::kFactor +
whole_tiles * stride_ / sections_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator++() {
// Integer matrix multiply 16832 Interleaved-32
// NONE
// Integer matrix multiply 16816 Interleaved-32 || Integer matrix multiply 16816 kblock=32
// Integer matrix multiply 8816 Interleaved-32
// ^1 ^1
// Matrix multiply 1684.TF32 kblock=16 || Integer matrix multiply 16816 kblock=64
// Matrix multiply 1688 kblock=32 || Integer matrix multiply 8816 kblock=64
// ^1 ^3 ^1 ^3
// Matrix multiply 1688 kblock=64
// ^1 ^3 ^1 ^7 ^1 ^3 ^1 ^7
// Matrix multiply 16816 kblock=32 | 1688.TF32 kblock=16 || Integer matrix multiply 16832 kblock=64
// ^2 ^2
// Matrix multiply 16816 kblock=64 | 1688.TF32 kblock=32 || Integer matrix multiply 16832 kblock=128
// ^2 ^6 ^2 ^6
if ((Policy::kGroupsPerTile / kPartitionsK) > 1) {
int mask = ((Policy::kGroupsPerTile / kPartitionsK) == 8)
? 3
: (((Policy::kGroupsPerTile / kPartitionsK) == 4) ? 1 : 0);
if (((k_group_idx_ & mask) % 2) == 0)
byte_offset_ ^= 1 * Policy::LdsmShape::kContiguous *
sizeof_bits<Element>::value *
Layout::kElementsPerAccess / 8;
else if ((k_group_idx_ & mask) == 1)
byte_offset_ ^= 3 * Policy::LdsmShape::kContiguous *
sizeof_bits<Element>::value *
Layout::kElementsPerAccess / 8;
else if ((k_group_idx_ & mask) == 3)
byte_offset_ ^= 7 * Policy::LdsmShape::kContiguous *
sizeof_bits<Element>::value *
Layout::kElementsPerAccess / 8;
}
k_group_idx_++;
if (k_group_idx_ == (Policy::kGroupsPerTile / kPartitionsK)) {
k_group_idx_ = 0;
add_tile_offset({Policy::kGroupsPerTile, 0});
}
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &operator--() { assert(0); }
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { load_with_byte_offset(frag, 0); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
Array<unsigned, Policy::LdsmShape::kCount> *fetch_ptr =
reinterpret_cast<Array<unsigned, Policy::LdsmShape::kCount> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsmIterations::kContiguous;
AccessType const *source_ptr =
pointer_ + Policy::LdsmShape::kContiguous * c +
Policy::kLdsmOpInner / Layout::kFactor *
Policy::LdsmShape::kStrided * s * stride_;
char const *source_byte_ptr =
reinterpret_cast<char const *>(source_ptr) + byte_offset +
byte_offset_;
cutlass::arch::ldsm<layout::RowMajor, Policy::LdsmShape::kCount>(
fetch_ptr[access_idx], source_byte_ptr);
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset = tile_offset.contiguous() *
InstructionShape::kContiguous /
Layout::kElementsPerAccess +
tile_offset.strided() * Shape::kStrided * stride_;
byte_offset += sizeof_bits<AccessType>::value * pointer_offset / 8;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
k_group_idx_ = k_group % (Policy::kGroupsPerTile / kPartitionsK);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to
/// load from shared memory and therefore must be initialized with a TensorRef
/// to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Element number when the layout crosses (in units of elements)
int Crosswise,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kB,
"MmaTensorOpMultiplicandIterator for ColumnMajor Crosswise may "
"only be instantiated for B operand to warp-level Mma.");
/// Element type
using Element = Element_;
/// KBlock size
static int const kCrosswise = Crosswise;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, kCrosswise>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
kCrosswise>,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id)
: iterator_({ref.data(), ref.stride()}, lane_id) {}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(
TensorCoord const &tile_offset) {
iterator_.add_tile_offset_negative({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { iterator_.load(frag); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag, {tile_offset.contiguous(), tile_offset.strided()}, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to
/// load from shared memory and therefore must be initialized with a TensorRef
/// to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Element number when the layout crosses (in units of elements)
int Crosswise,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA,
"MmaTensorOpMultiplicandIterator for RowMajor Crosswise may "
"only be instantiated for A operand to warp-level Mma.");
/// Element type
using Element = Element_;
/// Element number when the layout crosses
static int const kCrosswise = Crosswise;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, kCrosswise>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
kCrosswise>,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id)
: iterator_({ref.data(), ref.stride()}, lane_id) {}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(
TensorCoord const &tile_offset) {
iterator_.add_tile_offset_negative({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { iterator_.load(frag); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaTensorOpAccumulatorTileIterator;
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store
/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major
/// accumulator layout.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept |
/// WriteableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaTensorOpAccumulatorTileIterator<
Shape_, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static bool const kDivisible =
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN);
static_assert(platform::is_same<TensorCoord, MatrixCoord>::value,
"Layouts must be defined for logical MatrixCoord coordinate space.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
(Shape::kRow + InstructionShape::kM - 1) / InstructionShape::kM,
(Shape::kColumn + InstructionShape::kN - 1) / InstructionShape::kN
>;
};
private:
// Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire
// shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements
// of that row. The accumulators within one row are assumed to be consecutive.
static int const kElementsPerAccess = InstructionShape::kN / 4;
static int const kRowsPerTile = 8;
static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
Policy::MmaIterations::kCount * InstructionShape::kMN / kThreads>;
private:
/// Reference to output tensor
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn));
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
frag[mma_accum_start + row * kElementsPerAccess + col] = offset_ref.at({accum_m, accum_n});
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
int idx = mma_accum_start + row * kElementsPerAccess + col;
offset_ref.at({accum_m, accum_n}) = frag[idx];
}
}
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store
/// accumulators from memory and is agnostic to layout.
///
/// This iterator is not tested.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept |
/// WriteableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaTensorOpAccumulatorTileIterator<
Shape_, Element_, cutlass::layout::AffineRankN<2>, InstructionShape_, OpDelta_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static bool const kDivisible =
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN);
static_assert(platform::is_same<TensorCoord, MatrixCoord>::value,
"Layouts must be defined for logical MatrixCoord coordinate space.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
(Shape::kRow + InstructionShape::kM - 1) / InstructionShape::kM,
(Shape::kColumn + InstructionShape::kN - 1) / InstructionShape::kN
>;
};
private:
// Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire
// shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements
// of that row. The accumulators within one row are assumed to be consecutive.
static int const kElementsPerAccess = InstructionShape::kN / 4;
static int const kRowsPerTile = 8;
static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
Policy::MmaIterations::kCount * InstructionShape::kMN / kThreads>;
private:
/// Reference to output tensor
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn));
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
frag[mma_accum_start + row * kElementsPerAccess + col] = offset_ref.at({accum_m, accum_n});
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
int idx = mma_accum_start + row * kElementsPerAccess + col;
offset_ref.at({accum_m, accum_n}) = frag[idx];
}
}
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store
/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major
/// accumulator layout.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept |
/// WriteableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaTensorOpAccumulatorTileIterator<Shape_, Element_,
cutlass::layout::ColumnMajor,
InstructionShape_, OpDelta_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static bool const kDivisible =
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN);
static_assert(platform::is_same<TensorCoord, MatrixCoord>::value,
"Layouts must be defined for logical MatrixCoord coordinate space.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
(Shape::kRow + InstructionShape::kM - 1) / InstructionShape::kM,
(Shape::kColumn + InstructionShape::kN - 1) / InstructionShape::kN
>;
};
private:
// Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire
// shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements
// of that row. The accumulators within one row are assumed to be consecutive.
static int const kElementsPerAccess = InstructionShape::kN / 4;
static int const kRowsPerTile = 8;
static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element,
Policy::MmaIterations::kCount * InstructionShape::kMN / kThreads>;
private:
/// Reference to output tensor
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn));
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
int idx = mma_accum_start + row * kElementsPerAccess + col;
frag[idx] = offset_ref.at({accum_m, accum_n});
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
int idx = mma_accum_start + row * kElementsPerAccess + col;
offset_ref.at({accum_m, accum_n}) = frag[idx];
}
}
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store
/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major
/// accumulator layout.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept |
/// WriteableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element typ
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_,
/// Interleaved N
int InterleavedN>
class MmaTensorOpAccumulatorTileIterator<
Shape_, Element_, cutlass::layout::ColumnMajorInterleaved<InterleavedN>,
InstructionShape_, OpDelta_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorInterleaved<InterleavedN>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(platform::is_same<TensorCoord, MatrixCoord>::value,
"Layouts must be defined for logical MatrixCoord coordinate space.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
};
private:
static int const kElementsPerAccess = 2;
public:
//
// Derived quantities
//
using AccessType = Array<Element, kElementsPerAccess>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, Shape::kCount / kThreads>;
private:
/// Reference to output tensor
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn));
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
AccessType* frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int accum_m = mma_m * InstructionShape::kM;
int accum_n = mma_n * InstructionShape::kN;
int idx = mma_m + mma_n * Policy::MmaIterations::kRow;
AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() +
offset_ref.offset(TensorCoord(accum_m, accum_n)));
frag_ptr[idx] = access_ptr[0];
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int accum_m = mma_m * InstructionShape::kM;
int accum_n = mma_n * InstructionShape::kN;
int idx = mma_m + mma_n * Policy::MmaIterations::kRow;
AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() +
offset_ref.offset(TensorCoord(accum_m, accum_n)));
access_ptr[0] = frag_ptr[idx];
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store
/// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major
/// accumulator layout.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept |
/// WriteableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element typ
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_,
/// Interleaved N
int InterleavedN>
class MmaTensorOpAccumulatorTileIterator<
Shape_, Element_, cutlass::layout::TensorNCxHWx<InterleavedN>,
InstructionShape_, OpDelta_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = int8_t;
/// Layout of source tile
using Layout = cutlass::layout::TensorNCxHWx<InterleavedN>;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of elements in strided dimension that each STG writes
static int const kStridedPerSTG = 8;
/// Factor to calculate reorder index to pack accumulator.
static int const kPackedFactor = Shape::kColumn / 32;
/// Number of mma operations performed
using MmaIterations = MatrixShape<Shape::kRow / kStridedPerSTG,
Shape::kColumn / InterleavedN>;
};
private:
static int const kElementsPerAccess = InterleavedN / 4;
public:
//
// Derived quantities
//
struct alignas((kElementsPerAccess * sizeof_bits<Element>::value / 8)) AccessType {
Array<Element, kElementsPerAccess> storage;
};
/// Fragment object holding a thread's part of a tile
using Fragment = Array<int32_t, Shape::kCount / kThreads>;
private:
/// Reference to output tensor
TensorRef ref_;
/// Row offset index globally
LongIndex global_offset_row_;
/// Column offset index globally
LongIndex global_offset_col_;
/// Output tensor size
TensorCoord extent_;
/// Alpha
float alpha_;
/// Beta
float beta_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator(
TensorRef const &ref,
int const lane_id,
TensorCoord extent,
float alpha = 1.0f,
float beta = 0.0f
):
ref_(ref),
extent_(extent),
alpha_(alpha),
beta_(beta) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
global_offset_row_ = quad;
global_offset_col_ = lane_in_quad * kElementsPerAccess;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_tile_offset(MatrixCoord const &tile_offset) {
global_offset_row_ += tile_offset.row() * Shape::kRow;
global_offset_col_ += tile_offset.column() * Shape::kColumn;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
AccessType* frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kN; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kM; ++mma_m) {
int accum_m = mma_m * InstructionShape::kM;
int accum_n = mma_n * InstructionShape::kN;
int idx = mma_m + mma_n * Policy::MmaIterations::kM;
AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() +
accum_m * offset_ref.stride(0) + accum_n);
frag_ptr[idx] = access_ptr[0];
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
Array<float, Shape::kCount / kThreads> output_frag_f;
Array<Element, Shape::kCount / kThreads> output_frag;
LongIndex pq = extent_.h() * extent_.w();
LongIndex extent_row = extent_.n() * pq;
LongIndex extent_col = extent_.c();
LongIndex k_major = (global_offset_col_ / InterleavedN) * pq;
Index k_minor = global_offset_col_ % InterleavedN;
LongIndex k_offset = k_major * InterleavedN + k_minor;
LongIndex k_offset_delta = pq * InterleavedN;
LongIndex stride_n = pq * extent_.c();
Index n;
LongIndex pq_rem;
unsigned int pq_mul, pq_shr;
find_divisor(pq_mul, pq_shr, pq);
if(beta_ == 0.0f) {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < frag.size(); ++i) {
output_frag_f[i] = frag[i];
}
if(InstructionShape::kM == Policy::kStridedPerSTG) {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < frag.size(); ++i) {
output_frag[i] = (Element)(output_frag_f[i] * alpha_);
}
} else {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < frag.size(); ++i) {
int map_i = (i / (16 * Policy::kPackedFactor)) * (16 * Policy::kPackedFactor)
+ (i % (8 * Policy::kPackedFactor)) / 2 * 4
+ (i % (8 * Policy::kPackedFactor)) % 2
+ (i / (8 * Policy::kPackedFactor)) % 2 * 2;
output_frag[i] = (Element)(output_frag_f[map_i] * alpha_);
}
}
AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&output_frag);
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int accum_m = mma_m * Policy::kStridedPerSTG;
fast_divmod(n, pq_rem, global_offset_row_ + accum_m, pq, pq_mul, pq_shr);
LongIndex offset_m = n * stride_n + k_offset + pq_rem * InterleavedN;
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
int accum_n = mma_n * InterleavedN;
int idx = mma_n + mma_m * Policy::MmaIterations::kColumn;
if((global_offset_row_ + accum_m < extent_row) && (global_offset_col_ + accum_n < extent_col)) {
AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() +
offset_m + mma_n * k_offset_delta);
access_ptr[0] = frag_ptr[idx];
}
}
}
} else {
if(InstructionShape::kM == Policy::kStridedPerSTG) {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < frag.size(); ++i) {
output_frag_f[i] = frag[i];
}
} else {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < frag.size(); ++i) {
int map_i = (i / (16 * Policy::kPackedFactor)) * (16 * Policy::kPackedFactor)
+ (i % (8 * Policy::kPackedFactor)) / 2 * 4
+ (i % (8 * Policy::kPackedFactor)) % 2
+ (i / (8 * Policy::kPackedFactor)) % 2 * 2;
output_frag_f[i] = frag[map_i];
}
}
AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&output_frag);
Array<Element, kElementsPerAccess> ref_frag;
AccessType *ref_frag_ptr = reinterpret_cast<AccessType *>(&ref_frag);
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int accum_m = mma_m * Policy::kStridedPerSTG;
fast_divmod(n, pq_rem, global_offset_row_ + accum_m, pq, pq_mul, pq_shr);
LongIndex offset_m = n * stride_n + k_offset + pq_rem * InterleavedN;
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
int accum_n = mma_n * InterleavedN;
int idx = mma_n + mma_m * Policy::MmaIterations::kColumn;
if((global_offset_row_ + accum_m < extent_row) && (global_offset_col_ + accum_n < extent_col)) {
AccessType* access_ptr = reinterpret_cast<AccessType *>(offset_ref.data() +
offset_m + mma_n * k_offset_delta);
ref_frag_ptr[0] = access_ptr[0];
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < kElementsPerAccess; ++i) {
output_frag[idx * kElementsPerAccess + i] = Element(alpha_ * output_frag_f[idx * kElementsPerAccess + i]
+ beta_ * ref_frag[i]);
}
access_ptr[0] = frag_ptr[idx];
}
}
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 136,033 | C | 33.153653 | 118 | 0.650622 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sparse.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators to load sparse meta data used by warp-level matrix multiply operations
targeting Sparse Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
int OpDelta_,
/// Number of threads participating in one matrix operation
int Threads,
/// Number of partitions along K dimension
int PartitionsK_ = 1>
class SparseMmaTensorOpMetaTileIterator {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = Layout_;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
static int const kSparse = 2;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kColumn % InstructionShape::kColumn),
"Shape of warp-level Mma must be divisible by operator shape.");
static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
// Determine number of elements along outer dimension per individual LDSM op
static int const kLdsmOpOuter = InstructionShape::kColumn;
static int const kLdsmOpInner = 8 * kElementsPerAccess / kLdsmOpOuter;
static_assert(!(Shape::kColumn % kLdsmOpOuter),
"Shape of warp-level mma must be divisible by LDSM's "
"fundamental tile size.");
static_assert(!(Shape::kRow % kLdsmOpInner),
"Shape of warp-level mma must be divisible by LDSM's "
"fundamental tile size.");
/// Shape of one individual LDSM instruction
static int const LdsmShapeColumn =
InstructionShape::kColumn / kLdsmOpOuter;
static int const LdsmShapeRow =
((4 / LdsmShapeColumn * kLdsmOpInner) > Shape::kRow)
? (Shape::kRow / kLdsmOpInner)
: (4 / LdsmShapeColumn);
using LdsmShape =
layout::PitchLinearShape<LdsmShapeRow, LdsmShapeColumn>;
/// Number and arrangement of LDSM instructions
using LdsmIterations = layout::PitchLinearShape<
Shape::kRow / kLdsmOpInner / LdsmShapeRow,
1>;
/// Number of groups for each tile
static int const kGroupsPerTile =
Shape::kColumn / InstructionShape::kColumn;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = Array<Element, Policy::kElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kRow * InstructionShape::kColumn / kThreads>;
private:
/// Layout object storing stride values
Index stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
/// Internal counter used to determine when to increment byte offset and when
/// to XOR it
int k_group_idx_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
SparseMmaTensorOpMetaTileIterator()
: pointer_(nullptr),
stride_(0),
byte_offset_(0),
k_group_idx_(0) {}
/// Constructor from TensorRef
CUTLASS_DEVICE
SparseMmaTensorOpMetaTileIterator(TensorRef const &ref, int lane_id)
: pointer_(reinterpret_cast<AccessType const *>(ref.data())),
stride_(ref.stride(0) / Policy::kElementsPerAccess),
byte_offset_(0),
k_group_idx_(0) {
int access_contiguous = (lane_id % (Shape::kRow / Policy::kElementsPerAccess));
int access_strided = (lane_id / (Shape::kRow / Policy::kElementsPerAccess));
byte_offset_ = (access_contiguous + access_strided * stride_) *
sizeof_bits<Element>::value * Policy::kElementsPerAccess / 8;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
SparseMmaTensorOpMetaTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof_bits<Element>::value / 8;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
SparseMmaTensorOpMetaTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
int offset = tile_offset.row() * Shape::kRow +
tile_offset.column() * InstructionShape::kColumn * stride_ *
Policy::kElementsPerAccess;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
SparseMmaTensorOpMetaTileIterator &operator++() {
add_tile_offset({0, 1});
if (kPartitionsK > 1) {
++k_group_idx_;
// Jump to next stage
if (k_group_idx_ == Policy::kGroupsPerTile) {
k_group_idx_ = 0;
add_tile_offset(
{0, ((kPartitionsK - 1) * Policy::kGroupsPerTile)});
}
}
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
SparseMmaTensorOpMetaTileIterator &operator--(){
byte_offset_ -= stride_ * InstructionShape::kColumn *
sizeof_bits<Element>::value * Policy::kElementsPerAccess /
8;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE SparseMmaTensorOpMetaTileIterator &
operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
SparseMmaTensorOpMetaTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { load_with_byte_offset(frag, 0); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
Array<unsigned, Policy::LdsmShape::kCount> *fetch_ptr =
reinterpret_cast<Array<unsigned, Policy::LdsmShape::kCount> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::LdsmIterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsmIterations::kContiguous;
AccessType const *source_ptr =
pointer_ +
Policy::LdsmShape::kContiguous * Policy::kLdsmOpInner * c +
Policy::LdsmShape::kStrided * s * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) +
byte_offset + byte_offset_;
cutlass::arch::ldsm<layout::RowMajor, Policy::LdsmShape::kCount>(
fetch_ptr[access_idx], source_byte_ptr);
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kRow / Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kColumn * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no op
}
};
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 13,151 | C | 33.519685 | 100 | 0.660862 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_tensor_op_fast_f32.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
enum class TensorFloat32Op {
k3xTF32,
k4xTF32
};
template <
/// Floating-point rounding style
FloatRoundStyle RoundBigA_,
/// Floating-point rounding style
FloatRoundStyle RoundSmallA_,
/// Floating-point rounding style
FloatRoundStyle RoundBigB_ = RoundBigA_,
/// Floating-point rounding style
FloatRoundStyle RoundSmallB_ = RoundSmallA_,
/// Precision for TensorFloat32Op
// (k3xTF32: BigxBig, BigxSmall, SmallxBig)
// (k4xTF32: BigxBig, BigxSmall, SmallxBig, SmallxSmall)
TensorFloat32Op Precision_ = TensorFloat32Op::k3xTF32
>
struct FastF32 {
static FloatRoundStyle const kRoundBigA = RoundBigA_;
static FloatRoundStyle const kRoundSmallA = RoundSmallA_;
static FloatRoundStyle const kRoundBigB = RoundBigB_;
static FloatRoundStyle const kRoundSmallB = RoundSmallB_;
static TensorFloat32Op const kPrecision = Precision_;
};
namespace detail {
template<
int N,
FloatRoundStyle RoundBig = FloatRoundStyle::round_toward_zero,
FloatRoundStyle RoundSmall = FloatRoundStyle::round_half_ulp_truncate
>
struct ConvertAndPackAccurateF32 {
/// Rounding styles for big and small part
static FloatRoundStyle const kRoundBig = RoundBig;
static FloatRoundStyle const kRoundSmall = RoundSmall;
/// Converter type
using Converter = NumericConverterFastF32<kRoundBig, kRoundSmall>;
/// Source fragement
using SourceFragment = Array<float, N>;
/// Destination fragment
using DestinationFragment = Array<tfloat32_t, N>;
/// Converter Fragment holding two tfloat32_t elements for every float
using ConverterFragment = Array<tfloat32_t, 2>;
/// Index in fargments for the big and small part
static int const kBigIndex = 0;
static int const kSmallIndex = 1;
CUTLASS_HOST_DEVICE
void operator()(SourceFragment const &source,
DestinationFragment &dst_big,
DestinationFragment &dst_small) {
Converter convert_;
ConverterFragment result_;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
// convert source to result fragment
result_ = convert_(source[i]);
// store converted result fragments to destination fragment
dst_big[i] = result_[kBigIndex];
dst_small[i] = result_[kSmallIndex];
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK_ = 1,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Used for partial specialization
typename Enable = bool
>
class MmaTensorOpFastF32;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float*float+float => float using TF32 TensorOps
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK_,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor,
/// Used for partial specialization
typename Enable
>
class MmaTensorOpFastF32<
Shape_,
float, LayoutA_,
float, LayoutB_,
float, LayoutC_,
Policy_, PartitionsK_,
AccumulatorsInRowMajor, Enable> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = float;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = float;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = float;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddFastF32;
/// Architecture tag from underlying instruction
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Complex transform on A operand
static ComplexTransform const kTransformA = ComplexTransform::kNone;
/// Complex transform on B operand
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// Tune F32 to TF32 big small conversion for float operation
/// Different combination of big small conversin can cause different tradeoff
/// between speed and accuracy. Generally, use round_half_ulp_truncate can
/// improve the performance but hur the accuracy.
using MmaFastF32 = FastF32 <
FloatRoundStyle::round_toward_zero, // kRoundBigA
FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallA
FloatRoundStyle::round_toward_zero, // kRoundBigB
FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallB
TensorFloat32Op::k3xTF32 // Number of TF32 operations
>;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
kThreadCount,
kPartitionsK
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA =
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements * 2>;
/// Fragment bisecting big and small sections
using AccessTypeFragmentA =
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements>;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kRow,
kThreadCount,
kPartitionsK
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB =
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements * 2>;
/// Fragment bisecting big and small sections
using AccessTypeFragmentB =
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements>;
/// Index in fargments for the big and small part
static int const kBigIndex = 0;
static int const kSmallIndex = 1;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC,
typename ArchMmaOperator::Shape, typename Policy::OpDelta>;
/// Storage for C tile
using FragmentC = typename IteratorC::Fragment;
/// Number of mma operations performed
using MmaIterations = MatrixShape<
(Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM,
(Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN
>;
public:
/// Underlying matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaTensorOpFastF32() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C
) const {
AccessTypeFragmentA const *ptr_A = reinterpret_cast<AccessTypeFragmentA const*>(&A);
AccessTypeFragmentB const *ptr_B = reinterpret_cast<AccessTypeFragmentB const*>(&B);
//
// Accumulate in place
//
D = C;
mma_operator(D, ptr_A[kSmallIndex], ptr_B[kBigIndex], D);
mma_operator(D, ptr_A[kBigIndex], ptr_B[kSmallIndex], D);
mma_operator(D, ptr_A[kBigIndex], ptr_B[kBigIndex], D);
if (MmaFastF32::kPrecision == TensorFloat32Op::k4xTF32)
mma_operator(D, ptr_A[kSmallIndex], ptr_B[kSmallIndex], D);
}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void mma_operator(
FragmentC &D,
AccessTypeFragmentA const &A,
AccessTypeFragmentB const &B,
FragmentC const &C
) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A);
MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B);
MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D);
// Serpentine visitation order maximizing reuse of Ra
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// This allows to reuse of Rb when at serpentine turns
int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n);
if (AccumulatorsInRowMajor) { // matrix B is reordered
mma(
ptr_D[n_serpentine + m * MmaIterations::kColumn],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[n_serpentine + m * MmaIterations::kColumn]);
} else {
mma(
ptr_D[m + n_serpentine * MmaIterations::kRow],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[m + n_serpentine * MmaIterations::kRow]);
}
} // end n loop
} // end m loop
#else
assert(0);
#endif
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
//
// Define conversions from source type to instruction type
//
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
detail::ConvertAndPackAccurateF32<
FragmentA::kElements / 2,
MmaFastF32::kRoundBigA,
MmaFastF32::kRoundSmallA> convert_A;
detail::ConvertAndPackAccurateF32<
FragmentB::kElements,
MmaFastF32::kRoundBigB,
MmaFastF32::kRoundSmallB> convert_B;
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements> *ptr_dst_B =
reinterpret_cast<Array<typename ArchMmaOperator::ElementB, FragmentB::kElements> *>(&dst_B);
convert_B(B, ptr_dst_B[0], ptr_dst_B[1]);
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> *ptr_dst_A =
reinterpret_cast<Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> *>(&dst_A);
Array<ElementA, FragmentA::kElements / 2> const *ptr_A =
reinterpret_cast<Array<ElementA, FragmentA::kElements / 2> const *>(&A);
convert_A(ptr_A[0], ptr_dst_A[0], ptr_dst_A[2]);
convert_A(ptr_A[1], ptr_dst_A[1], ptr_dst_A[3]);
#else
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 15,721 | C | 32.309322 | 104 | 0.658291 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/thread/mma_sm50.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for multiply-add operations
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/arch/mma.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles all packed matrix layouts
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: layout::MapFunc)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: layout::MapFunc)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: layout::MapFunc)
typename LayoutC_,
/// Operator used to compute GEMM
typename Operator_
>
struct MmaGeneric {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = ElementA_;
/// Layout of A matrix (concept: layout::MapFunc)
using LayoutA = LayoutA_;
/// Data type of operand B
using ElementB = ElementB_;
/// Layout of B matrix (concept: layout::MapFunc)
using LayoutB = LayoutB_;
/// Element type of operand C
using ElementC = ElementC_;
/// Layout of C matrix (concept: layout::MapFunc)
using LayoutC = LayoutC_;
/// Underlying mathematical operator
using Operator = Operator_;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Instruction
using MmaOp = arch::Mma<
gemm::GemmShape<1,1,1>,
1,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
Operator>;
static bool const kMultipleOf2 = ((Shape::kM % 2 == 0) && (Shape::kN % 2 == 0));
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TensorRef<ElementA const, LayoutA> a_ref(
reinterpret_cast<ElementA const *>(&A), LayoutA::packed({Shape::kM, Shape::kK}));
TensorRef<ElementB const, LayoutB> b_ref(
reinterpret_cast<ElementB const *>(&B), LayoutB::packed({Shape::kK, Shape::kN}));
TensorRef<ElementC, LayoutC> d_ref(
reinterpret_cast<ElementC *>(&D), LayoutC::packed(make_Coord(Shape::kM, Shape::kN)));
MmaOp mma_op;
// Copy accumulators
D = C;
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK; ++k) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 860)
if (kMultipleOf2 &&
platform::is_same<ElementA, float>::value &&
platform::is_same<ElementB, float>::value &&
platform::is_same<ElementC, float>::value) {
//2x2 zigzag - m and n loops to increment by 2. Inner loop to process 4 multiply-adds in a 2x2 tile.
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN; n+=2) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; m+=2) {
int m_serpentine = (n % 4) ? (Shape::kM - 2 - m) : m;
//top-left element in 2x2 tile
{
MatrixCoord mn(m_serpentine, n);
MatrixCoord mk(m_serpentine, k);
MatrixCoord kn(k, n);
Array<ElementC, 1> d;
Array<ElementA, 1> a;
Array<ElementB, 1> b;
d[0] = d_ref.at(mn);
a[0] = a_ref.at(mk);
b[0] = b_ref.at(kn);
mma_op(d, a, b, d);
d_ref.at(mn) = d[0];
}
//bottom-left element in 2x2 tile
{
MatrixCoord mn(m_serpentine+1, n);
MatrixCoord mk(m_serpentine+1, k);
MatrixCoord kn(k, n);
Array<ElementC, 1> d;
Array<ElementA, 1> a;
Array<ElementB, 1> b;
d[0] = d_ref.at(mn);
a[0] = a_ref.at(mk);
b[0] = b_ref.at(kn);
mma_op(d, a, b, d);
d_ref.at(mn) = d[0];
}
//bottom-right element in 2x2 tile
{
MatrixCoord mn(m_serpentine+1, n+1);
MatrixCoord mk(m_serpentine+1, k);
MatrixCoord kn(k, n+1);
Array<ElementC, 1> d;
Array<ElementA, 1> a;
Array<ElementB, 1> b;
d[0] = d_ref.at(mn);
a[0] = a_ref.at(mk);
b[0] = b_ref.at(kn);
mma_op(d, a, b, d);
d_ref.at(mn) = d[0];
}
//top-right element in 2x2 tile
{
MatrixCoord mn(m_serpentine, n+1);
MatrixCoord mk(m_serpentine, k);
MatrixCoord kn(k, n+1);
Array<ElementC, 1> d;
Array<ElementA, 1> a;
Array<ElementB, 1> b;
d[0] = d_ref.at(mn);
a[0] = a_ref.at(mk);
b[0] = b_ref.at(kn);
mma_op(d, a, b, d);
d_ref.at(mn) = d[0];
}
}
}
} else
#endif
{
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
int m_serpentine = (n % 2) ? (Shape::kM - 1 - m) : m;
MatrixCoord mn(m_serpentine, n);
MatrixCoord mk(m_serpentine, k);
MatrixCoord kn(k, n);
Array<ElementC, 1> d;
Array<ElementA, 1> a;
Array<ElementB, 1> b;
d[0] = d_ref.at(mn);
a[0] = a_ref.at(mk);
b[0] = b_ref.at(kn);
mma_op(d, a, b, d);
d_ref.at(mn) = d[0];
}
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Matrix multiply-add operation - assumes operand B is not changing
struct MmaComplexF32_Column {
using Shape = gemm::GemmShape<1, 1, 1>;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<complex<float>, 1> const &a,
Array<complex<float>, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = a[0].real() * b[0].real() + c[0].real();
d[0].imag() = a[0].real() * b[0].imag() + d[0].imag();
d[0].real() = -a[0].imag() * b[0].imag() + d[0].real();
d[0].imag() = a[0].imag() * b[0].real() + c[0].imag();
}
};
/// Matrix multiply-add operation - assumes operand A is not changing
struct MmaComplexF32_Corner {
using Shape = gemm::GemmShape<1, 1, 1>;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<complex<float>, 1> const &a,
Array<complex<float>, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = -a[0].imag() * b[0].imag() + d[0].real();
d[0].imag() = a[0].real() * b[0].imag() + d[0].imag();
d[0].real() = a[0].real() * b[0].real() + c[0].real();
d[0].imag() = a[0].imag() * b[0].real() + c[0].imag();
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles all packed matrix layouts
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: layout::MapFunc)
typename LayoutA_,
/// Layout of B matrix (concept: layout::MapFunc)
typename LayoutB_,
/// Layout of C matrix (concept: layout::MapFunc)
typename LayoutC_
>
struct MmaGeneric<
Shape_,
complex<float>,
LayoutA_,
complex<float>,
LayoutB_,
complex<float>,
LayoutC_,
arch::OpMultiplyAdd> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = complex<float>;
/// Layout of A matrix (concept: layout::MapFunc)
using LayoutA = LayoutA_;
/// Data type of operand B
using ElementB = complex<float>;
/// Layout of B matrix (concept: layout::MapFunc)
using LayoutB = LayoutB_;
/// Element type of operand C
using ElementC = complex<float>;
/// Layout of C matrix (concept: layout::MapFunc)
using LayoutC = LayoutC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Instruction
using MmaOp = arch::Mma<
gemm::GemmShape<1,1,1>,
1,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
Operator>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TensorRef<ElementA const, LayoutA> a_ref(
reinterpret_cast<ElementA const *>(&A), LayoutA::packed({Shape::kM, Shape::kK}));
TensorRef<ElementB const, LayoutB> b_ref(
reinterpret_cast<ElementB const *>(&B), LayoutB::packed({Shape::kK, Shape::kN}));
TensorRef<ElementC, LayoutC> d_ref(
reinterpret_cast<ElementC *>(&D), LayoutC::packed(make_Coord(Shape::kM, Shape::kN)));
detail::MmaComplexF32_Column mma_column;
detail::MmaComplexF32_Corner mma_corner;
// Copy accumulators
D = C;
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
int m_serpentine = (n % 2) ? (Shape::kM - 1 - m) : m;
MatrixCoord mn(m_serpentine, n);
MatrixCoord mk(m_serpentine, k);
MatrixCoord kn(k, n);
Array<ElementC, 1> d;
Array<ElementA, 1> a;
Array<ElementB, 1> b;
d[0] = d_ref.at(mn);
a[0] = a_ref.at(mk);
b[0] = b_ref.at(kn);
if ((m == 0 && n) || m == Shape::kM - 1) {
mma_corner(d, a, b, d);
}
else {
mma_column(d, a, b, d);
}
d_ref.at(mn) = d[0];
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles conventional layouts for FFMA and DFMA GEMM
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: layout::MapFunc)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: layout::MapFunc)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: layout::MapFunc)
typename LayoutC_
>
struct Mma<
Shape_,
ElementA_,
LayoutA_,
ElementB_,
LayoutB_,
ElementC_,
LayoutC_,
arch::OpMultiplyAdd,
bool> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = ElementA_;
/// Layout of A matrix (concept: layout::MapFunc)
using LayoutA = LayoutA_;
/// Data type of operand B
using ElementB = ElementB_;
/// Layout of B matrix (concept: layout::MapFunc)
using LayoutB = LayoutB_;
/// Element type of operand C
using ElementC = ElementC_;
/// Layout of C matrix (concept: layout::MapFunc)
using LayoutC = LayoutC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename MmaGeneric<
Shape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
Operator>::MmaOp;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
MmaGeneric<
Shape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
Operator> mma;
mma(D, A, B, C);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 15,373 | C | 27.47037 | 108 | 0.5429 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/thread/mma_sm60.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for multiply-add operations
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
#include "cutlass/functional.h"
#include "cutlass/reduction/thread/reduce.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Structure to compute the matrix product for HFMA
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Type of GEMM inner vs outer product
bool
>
struct Mma_HFMA2;
/////////////////////////////
// Specialization for NNN //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::ColumnMajor,
layout::ColumnMajor,
layout::ColumnMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kM % 2),
"Mma_HFMA2 requires the M dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x1x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 1> const *ptr_B = reinterpret_cast<Array<half_t, 1> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
Array<half_t, 2> tmp;
Array<half_t, 2> *ptr_tmp = &tmp;
ptr_tmp[0] = ptr_D[n*Shape::kM/2 + m];
mma(
tmp,
ptr_A[k*Shape::kM/2 + m],
ptr_B[n*Shape::kK + k],
tmp);
ptr_D[n*Shape::kM/2 + m] = ptr_tmp[0];
}
}
}
}
};
/////////////////////////////
// Specialization for NNT //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2<
Shape_,
layout::ColumnMajor,
layout::ColumnMajor,
layout::RowMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kN % 2),
"Mma_HFMA2 requires the N dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 1> const *ptr_A = reinterpret_cast<Array<half_t, 1> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
Array<half_t, 2> tmp;
Array<half_t, 2> *ptr_tmp = &tmp;
ptr_tmp[0] = ptr_D[m*Shape::kN/2 + n];
Array<half_t, 2> tmp_B;
tmp_B[0] = ptr_B->at(2*n*Shape::kK + k);
tmp_B[1] = ptr_B->at((2*n+1)*Shape::kK + k);
mma(
tmp,
ptr_A[k*Shape::kM + m],
tmp_B,
tmp);
ptr_D[m*Shape::kN/2 + n] = ptr_tmp[0];
}
}
}
}
};
/////////////////////////////
// Specialization for NTN //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::ColumnMajor,
layout::RowMajor,
layout::ColumnMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kM % 2),
"Mma_HFMA2 requires the GEMM M dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
using Mma = arch::Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 1> const *ptr_B = reinterpret_cast<Array<half_t, 1> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK / Mma::Shape::kK; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM / Mma::Shape::kM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN / Mma::Shape::kN; ++n) {
Array<half_t, 2> tmp;
Array<half_t, 2> *ptr_tmp = &tmp;
ptr_tmp[0] = ptr_D[m + n * Shape::kM/2];
mma(
tmp,
ptr_A[m + k * Shape::kM/2],
ptr_B[k * Shape::kN + n],
tmp);
ptr_D[m + n * Shape::kM/2] = ptr_tmp[0];
}
}
}
}
};
/////////////////////////////
// Specialization for NTT //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2<
Shape_,
layout::ColumnMajor,
layout::RowMajor,
layout::RowMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kN % 2),
"Mma_HFMA2 requires the N dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 1> const *ptr_A = reinterpret_cast<Array<half_t, 1> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
Array<half_t, 2> tmp;
Array<half_t, 2> *ptr_tmp = &tmp;
ptr_tmp[0] = ptr_D[m*Shape::kN/2 + n];
mma(
tmp,
ptr_A[k*Shape::kM + m],
ptr_B[k*Shape::kN/2 + n],
tmp);
ptr_D[m*Shape::kN/2 + n] = ptr_tmp[0];
}
}
}
}
};
/////////////////////////////
// Specialization for TNN //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::RowMajor,
layout::ColumnMajor,
layout::ColumnMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kM % 2),
"Mma_HFMA2 requires the M dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x1x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 1> const *ptr_B = reinterpret_cast<Array<half_t, 1> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
Array<half_t, 2> tmp;
Array<half_t, 2> *ptr_tmp = &tmp;
ptr_tmp[0] = ptr_D[n*Shape::kM/2 + m];
Array<half_t, 2> tmp_A;
tmp_A[0] = ptr_A->at(2*m*Shape::kK + k);
tmp_A[1] = ptr_A->at((2*m+1)*Shape::kK + k);
mma(
tmp,
tmp_A,
ptr_B[n*Shape::kK + k],
tmp);
ptr_D[n*Shape::kM/2 + m] = ptr_tmp[0];
}
}
}
}
};
/////////////////////////////
// Specialization for TNT //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::RowMajor,
layout::ColumnMajor,
layout::RowMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kN % 2),
"Mma_HFMA2 requires the N dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 1> const *ptr_A = reinterpret_cast<Array<half_t, 1> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
Array<half_t, 2> tmp;
Array<half_t, 2> *ptr_tmp = &tmp;
ptr_tmp[0] = ptr_D[m*Shape::kN/2 + n];
Array<half_t, 2> tmp_B;
tmp_B[0] = ptr_B->at(2*n*Shape::kK + k);
tmp_B[1] = ptr_B->at((2*n+1)*Shape::kK + k);
mma(
tmp,
ptr_A[m*Shape::kK + k],
tmp_B,
tmp);
ptr_D[m*Shape::kN/2 + n] = ptr_tmp[0];
}
}
}
}
};
/////////////////////////////
// Specialization for TTN //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2 <
Shape_,
layout::RowMajor,
layout::RowMajor,
layout::ColumnMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kM % 2),
"Mma_HFMA2 requires the M dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 1> const *ptr_B = reinterpret_cast<Array<half_t, 1> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
Array<half_t, 2> tmp;
Array<half_t, 2> *ptr_tmp = &tmp;
ptr_tmp[0] = ptr_D[n*Shape::kM/2 + m];
Array<half_t, 2> tmp_A;
tmp_A[0] = ptr_A->at(2*m*Shape::kK + k);
tmp_A[1] = ptr_A->at((2*m+1)*Shape::kK + k);
mma(
tmp,
tmp_A,
ptr_B[k*Shape::kN + n],
tmp);
ptr_D[n*Shape::kM/2 + m] = ptr_tmp[0];
}
}
}
}
};
/////////////////////////////
// Specialization for TTT //
/////////////////////////////
template <typename Shape_>
struct Mma_HFMA2<
Shape_,
layout::RowMajor,
layout::RowMajor,
layout::RowMajor,
true
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kN % 2),
"Mma_HFMA2 requires the N dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x2x1 HFMA2 sequence for bulk of computation
using Mma = arch::Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd>;
Array<half_t, 2> *ptr_D = reinterpret_cast<Array<half_t, 2> *>(&D);
Array<half_t, 1> const *ptr_A = reinterpret_cast<Array<half_t, 1> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
Mma mma;
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / Mma::Shape::kK; k++){
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / Mma::Shape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / Mma::Shape::kM; m++){
Array<half_t, 2> tmp;
Array<half_t, 2> *ptr_tmp = &tmp;
ptr_tmp[0] = ptr_D[m*Shape::kN/2 + n];
mma(
tmp,
ptr_A[m*Shape::kK + k],
ptr_B[k*Shape::kN/2 + n],
tmp);
ptr_D[m*Shape::kN/2 + n] = ptr_tmp[0];
}
}
}
}
};
/////////////////////////////////////////////////////////////////////
// Specialization for TNT + Inner Product or 1x1x2K + LayoutC = T //
/////////////////////////////////////////////////////////////////////
template <typename Shape_, typename LayoutA, typename LayoutB>
struct Mma_HFMA2<
Shape_,
LayoutA,
LayoutB,
layout::RowMajor,
false
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kK % 2),
"Mma_HFMA2 requires the K dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x1x2 HFMA2 sequence for bulk of computation
using GemmShape = gemm::GemmShape<1,1,2>;
Array<half_t, 1> *ptr_D = reinterpret_cast<Array<half_t, 1> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
// Inner product is calculated using MACs, followed by final reduction
multiply_add<Array<half_t, 2>> mac;
cutlass::reduction::thread::Reduce< plus<half_t>, Array<half_t, 2> > reduce;
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / GemmShape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / GemmShape::kM; m++){
Array<half_t, 2> tmp_C;
tmp_C.clear();
Array<half_t, 1> *ptr_tmp_C = reinterpret_cast<Array<half_t, 1> *>(&tmp_C);
ptr_tmp_C[0] = ptr_D[n*Shape::kM + m];
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / GemmShape::kK; k++){
tmp_C = mac(ptr_A[m*Shape::kK/2 + k], ptr_B[n*Shape::kK/2 + k], tmp_C);
}
Array<half_t, 1> res;
Array<half_t, 1> *ptr_res = &res;
res = reduce(tmp_C);
ptr_D[m*Shape::kN + n] = ptr_res[0];
}
}
}
};
/////////////////////////////////////////////////////////////////////
// Specialization for TNN + Inner Product or 1x1x2K + LayoutC = N //
/////////////////////////////////////////////////////////////////////
template <typename Shape_, typename LayoutA, typename LayoutB>
struct Mma_HFMA2<
Shape_,
LayoutA,
LayoutB,
layout::ColumnMajor,
false
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// A operand storage
using FragmentA = Array<half_t, Shape::kMK>;
/// B operand storage
using FragmentB = Array<half_t, Shape::kKN>;
/// C operand storage
using FragmentC = Array<half_t, Shape::kMN>;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
static_assert(
!(Shape::kK % 2),
"Mma_HFMA2 requires the K dimension to be divisible by 2."
);
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
/// Initialize output with input
D = C;
/// Use 1x1x2 HFMA2 sequence for bulk of computation
using GemmShape= gemm::GemmShape<1,1,2>;
Array<half_t, 1> *ptr_D = reinterpret_cast<Array<half_t, 1> *>(&D);
Array<half_t, 2> const *ptr_A = reinterpret_cast<Array<half_t, 2> const *>(&A);
Array<half_t, 2> const *ptr_B = reinterpret_cast<Array<half_t, 2> const *>(&B);
// Inner product is calculated using MACs, followed by final reduction
multiply_add<Array<half_t, 2>> mac;
cutlass::reduction::thread::Reduce< plus<half_t>, Array<half_t, 2> > reduce;
CUTLASS_PRAGMA_UNROLL
for(auto n=0; n < Shape::kN / GemmShape::kN; n++){
CUTLASS_PRAGMA_UNROLL
for(auto m=0; m < Shape::kM / GemmShape::kM; m++){
Array<half_t, 2> tmp_C;
tmp_C.clear();
Array<half_t, 1> *ptr_tmp_C = reinterpret_cast<Array<half_t, 1> *>(&tmp_C);
ptr_tmp_C[0] = ptr_D[n*Shape::kM + m];
CUTLASS_PRAGMA_UNROLL
for(auto k=0; k < Shape::kK / GemmShape::kK; k++){
tmp_C = mac(ptr_A[m*Shape::kK/2 + k], ptr_B[n*Shape::kK/2 + k], tmp_C);
}
Array<half_t, 1> res;
Array<half_t, 1> *ptr_res = &res;
res = reduce(tmp_C);
ptr_D[n*Shape::kM + m] = ptr_res[0];
}
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_, typename LayoutA, typename LayoutB, typename LayoutC
>
struct Mma<
Shape_,
half_t,
LayoutA,
half_t,
LayoutB,
half_t,
LayoutC,
arch::OpMultiplyAdd
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = half_t;
/// Data type of operand B
using ElementB = half_t;
/// Element type of operand C
using ElementC = half_t;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
static bool const a_row_major = platform::is_same< LayoutA, layout::RowMajor>::value;
static bool const b_column_major = platform::is_same< LayoutB, layout::ColumnMajor>::value;
static bool const c_row_major = platform::is_same< LayoutC, layout::RowMajor>::value;
static bool const c_column_major = platform::is_same< LayoutC, layout::ColumnMajor>::value;
static bool const m_mod2 = !(Shape::kM % 2);
static bool const n_mod2 = !(Shape::kN % 2);
static bool const k_mod2 = !(Shape::kK % 2);
// HFMA based MMA optimizations are of 2 types :
// 1. Inner product
// 2. Outer product
// It is chosen based on LayoutC (for outer product gemm) or
// Using LayoutA and LayoutB or shape=1x1x2K (for inner product gemms)
// If all fails, we choose the generic MMA
static bool const use_outer_prod = (c_column_major && m_mod2) || (c_row_major && n_mod2);
static bool const use_inner_prod = (a_row_major && b_column_major && k_mod2) || (Shape::kM==1 && Shape::kN==1 && k_mod2);
static bool const use_optimized = (use_outer_prod || use_inner_prod);
using ArchMmaOperator = typename platform::conditional< use_optimized,
detail::Mma_HFMA2<Shape, LayoutA, LayoutB, LayoutC, use_outer_prod>,
MmaGeneric <Shape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, Operator>
>::type;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
ArchMmaOperator mma;
mma(D, A, B, C);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Determines whether to enable thread::Gemm<> specializations compatible with SM50
template <
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB>
struct EnableMma_Crow_SM60 {
static bool const kIsConventionalLayout =
(platform::is_same<LayoutA, layout::RowMajor>::value ||
platform::is_same<LayoutA, layout::ColumnMajor>::value) &&
(platform::is_same<LayoutB, layout::RowMajor>::value ||
platform::is_same<LayoutB, layout::ColumnMajor>::value);
static bool const value = kIsConventionalLayout;
};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes matrix product when C is row-major
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
typename LayoutA_,
typename LayoutB_
>
struct Mma<
Shape_,
half_t,
LayoutA_,
half_t,
LayoutB_,
half_t,
layout::RowMajor,
arch::OpMultiplyAdd,
typename platform::enable_if<detail::EnableMma_Crow_SM60<
LayoutA_,
LayoutB_
>::value>::type>{
using Shape = Shape_;
using ElementA = half_t;
using LayoutA = LayoutA_;
using ElementB = half_t;
using LayoutB = LayoutB_;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using Operator = arch::OpMultiplyAdd;
using TransposeMma = Mma<
GemmShapeTranspose<Shape>,
half_t,
typename layout::LayoutTranspose<LayoutB>::type,
half_t,
typename layout::LayoutTranspose<LayoutA>::type,
half_t,
layout::ColumnMajor,
arch::OpMultiplyAdd,
bool>;
using FragmentA = Array<ElementA, Shape::kMK>;
using FragmentB = Array<ElementB, Shape::kKN>;
using FragmentC = Array<ElementC, Shape::kMN>;
using ArchMmaOperator = typename TransposeMma::ArchMmaOperator;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TransposeMma mma;
mma(D, B, A, C);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 29,987 | C | 24.435114 | 123 | 0.557208 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/thread/mma_sm61.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for multiply-add operations
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles conventional layouts for IDP4A
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_
>
struct Mma<
Shape_,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int32_t,
LayoutC_,
arch::OpMultiplyAdd,
bool> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = int8_t;
/// Layout of A matrix (concept: layout::MapFunc)
using LayoutA = layout::RowMajor;
/// Data type of operand B
using ElementB = int8_t;
/// Layout of B matrix (concept: layout::MapFunc)
using LayoutB = layout::ColumnMajor;
/// Element type of operand C
using ElementC = int32_t;
/// Layout of C matrix (concept: layout::MapFunc)
using LayoutC = LayoutC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Underlying matrix multiply operator (concept: arch::Mma)
// Use 1x1x4 IDP4A sequence for bulk of computation
using ArchMmaOperator = arch::Mma<
gemm::GemmShape<1,1,4>,
1,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
arch::OpMultiplyAdd>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TensorRef<ElementC, LayoutC> d(
reinterpret_cast<ElementC *>(&D), LayoutC::packed({ Shape::kM, Shape::kN }));
// Copy accumulators
D = C;
/// Use 1x1x4 IDP4A sequence for bulk of computation
ArchMmaOperator mma;
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK / ArchMmaOperator::Shape::kK; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
MatrixCoord mn(m, n);
Array<int8_t, 4> const *ptr_A = reinterpret_cast<Array<int8_t, 4> const *>(&A);
Array<int8_t, 4> const *ptr_B = reinterpret_cast<Array<int8_t, 4> const *>(&B);
Array<int32_t, 1> tmp = reinterpret_cast<Array<int32_t, 1> &>(d.at(mn));
mma(
tmp,
ptr_A[m * Shape::kK / ArchMmaOperator::Shape::kK + k],
ptr_B[n * Shape::kK / ArchMmaOperator::Shape::kK + k],
tmp);
d.at(mn) = reinterpret_cast<int32_t &>(tmp);
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles conventional layouts for IDP4A
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_
>
struct Mma<
Shape_,
int8_t,
layout::ColumnMajor,
int8_t,
layout::RowMajor,
int32_t,
LayoutC_,
arch::OpMultiplyAdd,
int8_t> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = int8_t;
/// Layout of A matrix (concept: layout::MapFunc)
using LayoutA = layout::ColumnMajor;
/// Data type of operand B
using ElementB = int8_t;
/// Layout of B matrix (concept: layout::MapFunc)
using LayoutB = layout::RowMajor;
/// Element type of operand C
using ElementC = int32_t;
/// Layout of C matrix (concept: layout::MapFunc)
using LayoutC = LayoutC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMK>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kKN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Underlying matrix multiply operator (concept: arch::Mma)
/// Use 1x1x4 IDP4A sequence for bulk of computation
using ArchMmaOperator = arch::Mma<
gemm::GemmShape<1,1,4>,
1,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
arch::OpMultiplyAdd>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
TensorRef<ElementC, LayoutC> d(
reinterpret_cast<ElementC *>(&D), LayoutC::packed({ Shape::kM, Shape::kN }));
// Copy accumulators
D = C;
/// Underlying matrix multiply operator
ArchMmaOperator mma;
Array<int8_t, 4> const *ptr_A = reinterpret_cast<Array<int8_t, 4> const *>(&A);
Array<int8_t, 4> const *ptr_B = reinterpret_cast<Array<int8_t, 4> const *>(&B);
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Shape::kK / ArchMmaOperator::Shape::kK; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
MatrixCoord mn(m, n);
Array<int32_t, 1> tmp = reinterpret_cast<Array<int32_t, 1> &>(d.at(mn));
mma(
tmp,
ptr_A[m + k * Shape::kM],
ptr_B[n + k * Shape::kN],
tmp);
d.at(mn) = reinterpret_cast<int32_t &>(tmp);
}
}
}
}
};
} // namespace thread
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,142 | C | 27.57193 | 100 | 0.599116 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/mma_sparse_base.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Policy object describing MmaTensorOp
template <
/// Warp-level GEMM operator (concept: gemm::warp::Mma)
typename Operator_,
/// Padding used for A operand in shared memory (concept: MatrixShape)
typename SmemPaddingA_,
/// Padding used for B operand in shared memory (concept: MatrixShape)
typename SmemPaddingB_,
/// Padding used for E operand in shared memory (concept: MatrixShape)
typename SmemPaddingE_,
/// Number of partitions of K dimension of GEMM
int PartitionsK = 1>
struct SparseMmaPolicy {
/// Warp-level GEMM operator (concept: gemm::warp::MmaTensorOp or gemm::warp::MmaSimt)
using Operator = Operator_;
/// Padding used for A operand in shared memory
using SmemPaddingA = SmemPaddingA_;
/// Padding used for B operand in shared memory
using SmemPaddingB = SmemPaddingB_;
/// Padding used for B operand in shared memory
using SmemPaddingE = SmemPaddingE_;
/// Number of partitions of K dimension
static int const kPartitionsK = PartitionsK;
};
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class SparseMmaBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Policy describing tuning details
using Policy = Policy_;
//
// Dependent types
//
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = GemmShape<Shape::kM / WarpGemm::kM,
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator::Policy::MmaShape::kK);
static_assert(kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
static_assert((kWarpGemmIterations % 2) == 0,
"Inner loop iteration must be an even number.");
/// Number of stages
static int const kStages = Stages;
static int const kSparse = Operator::kSparse;
static int const kElementsPerElementE = Operator::kElementsPerElementE;
/// Tensor reference to the A operand
using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;
/// Tensor reference to the B operand
using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;
/// Tensor reference to the E operand
using TensorRefE = TensorRef<typename Operator::ElementE, typename Operator::LayoutE>;
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
class SharedStorage {
public:
//
// Type definitions
//
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,
Shape::kK / kSparse * kStages +
Policy::SmemPaddingA::kColumn>;
/// Shape of the B matrix operand in shared memory
using ShapeB =
MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow,
Shape::kN + Policy::SmemPaddingB::kColumn>;
/// Shape of the E matrix operand in shared memory
using ShapeE =
MatrixShape<Shape::kM * 2 + Policy::SmemPaddingE::kRow,
Shape::kK / kSparse / kElementsPerElementE / 2 * kStages +
Policy::SmemPaddingE::kColumn>;
public:
//
// Data members
//
/// Buffer for A operand
AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A;
/// Buffer for B operand
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;
/// Buffer for E operand
AlignedBuffer<typename Operator::ElementE, ShapeE::kCount> operand_E;
public:
//
// Methods
//
/// Returns a layout object for the A matrix
CUTLASS_DEVICE
static typename Operator::LayoutA LayoutA() {
return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator::LayoutB LayoutB() {
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});
}
/// Returns a layout object for the E matrix
CUTLASS_HOST_DEVICE
static typename Operator::LayoutE LayoutE() {
return Operator::LayoutE::packed({ShapeE::kRow, ShapeE::kColumn});
}
/// Returns a TensorRef to the A operand
CUTLASS_HOST_DEVICE
TensorRefA operand_A_ref() {
return TensorRefA{operand_A.data(), LayoutA()};
}
/// Returns a TensorRef to the B operand
CUTLASS_HOST_DEVICE
TensorRefB operand_B_ref() {
return TensorRefB{operand_B.data(), LayoutB()};
}
/// Returns a TensorRef to the E operand
CUTLASS_HOST_DEVICE
TensorRefE operand_E_ref() {
return TensorRefE{operand_E.data(), LayoutE()};
}
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A operand from shared memory
typename Operator::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator::IteratorB warp_tile_iterator_B_;
/// Iterator to load a warp-scoped tile of E operand from shared memory
typename Operator::IteratorE warp_tile_iterator_E_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
SparseMmaBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx),
warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx),
warp_tile_iterator_E_(shared_storage.operand_E_ref(), lane_idx) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 9,210 | C | 32.616788 | 100 | 0.638328 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/default_mma_with_reduction.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h"
#include "cutlass/gemm/threadblock/default_mma_core_with_reduction.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Operator class tag
typename OperatorClass,
///
bool ReduceKForA_,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Use zfill or predicate for SM80 out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone
>
struct DefaultMmaWithReduction {
static cutlass::arch::CacheOperation::Kind const CacheOpA =
((sizeof_bits<ElementA>::value * kAlignmentA) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * kAlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaWithReductionCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
ReduceKForA_, Stages, Operator, false, CacheOpA, CacheOpB>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaWithReductionMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor,
typename MmaCore::MmaPolicy, Stages, SharedMemoryClear>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 6,323 | C | 43.535211 | 102 | 0.667721 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/default_mma_core_with_reduction.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming
expectations about data layout of the global memory fragments, data types,
and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp
instructions.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/gemm/warp/default_mma_with_reduction_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h"
#include "cutlass/gemm/threadblock/mma_with_reduction_multistage.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Template defininng default matrix multiply operators inferred from threadblock tile size,
/// global memory data layout, and target math instruction.
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape_,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Reduce operand A or B along K dimension
bool ReduceKForA_,
/// Number of stages
int Stages = 2,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global,
/// per-element transformation for elements of A
ComplexTransform TransformA = ComplexTransform::kNone,
/// per-element transformation for elements of B
ComplexTransform TransformB = ComplexTransform::kNone,
bool IsComplex = false// (is_complex<ElementA>::value || is_complex<ElementB>::value)
>
struct DefaultMmaWithReductionCore {
using Base = DefaultMmaCore<Shape_,
WarpShape,
InstructionShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
OperatorClass,
Stages,
Operator,
AccumulatorsInRowMajor,
CacheOpA,
CacheOpB,
TransformA,
TransformB,
IsComplex>;
using Shape = Shape_;
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
using SmemIteratorA = typename Base::SmemIteratorA;
using SmemIteratorB = typename Base::SmemIteratorB;
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
using WarpCount = typename Base::WarpCount;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaWithReductionTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, ReduceKForA_, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| 7,387 | C | 42.97619 | 100 | 0.642074 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/mma_multistage.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
/// Used for partial specialization
typename Enable = bool>
class MmaMultistage :
public MmaBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = MmaBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Policy describing tuning details
using Policy = Policy_;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Minimum architecture is Sm80 to support cp.async
using ArchTag = arch::Sm80;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
/// Internal structure exposed for introspection.
struct Detail {
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA =
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
// Optional staged-accumulation (e.g., tf32x3 kernels) for improved numerical
// accuracy, where each mainloop iteration first accumulates into a temporary
// set of freshly-cleared accumulators, which are subsequently added to the
// final accumulator set.
static bool const kStagedAccumulation =
platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value ||
platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddComplexFastF32>::value;
};
private:
// Structure encapsulating pipeline state live from one iteration to the next
struct PipeState {
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
/// Temporary accumulator to facilitate staged-accumulation
FragmentC tmp_accum_;
/// Pair of A fragments used to overlap shared memory loads and math instructions
WarpLoadedFragmentA warp_loaded_frag_A_[2];
WarpTransformedFragmentA warp_transformed_frag_A_[2];
/// Pair of B fragments used to overlap shared memory loads and math instructions
WarpLoadedFragmentB warp_loaded_frag_B_[2];
WarpTransformedFragmentB warp_transformed_frag_B_[2];
};
private:
//
// Data members
//
/// Warp-level MMA operator
Operator warp_mma_;
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
/// Shared memory write stage index
int smem_write_stage_idx_;
/// Shared memory read stage index
int smem_read_stage_idx_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx),
smem_write_stage_idx_(0),
smem_read_stage_idx_(0)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
/// Advance shared memory read-iterators to the next stage
CUTLASS_DEVICE
void advance_smem_read_stage()
{
++smem_read_stage_idx_;
if (smem_read_stage_idx_ == Base::kStages) {
// Wrap back around to the 'start' of the circular buffer in shared memory
this->warp_tile_iterator_A_.add_tile_offset({0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset({-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
smem_read_stage_idx_ = 0;
}
}
/// Advance global memory read-iterators and shared memory write-iterators to the stage
CUTLASS_DEVICE
void advance_smem_write_stage(
IteratorA &iterator_A,
IteratorB &iterator_B)
{
// Advance global iterators
iterator_A.add_tile_offset({0, 1});
iterator_B.add_tile_offset({1, 0});
// Advance shared iterators
smem_iterator_A_.add_tile_offset({0, 1});
smem_iterator_B_.add_tile_offset({1, 0});
// Increment shared memory write stage index
++smem_write_stage_idx_;
if (smem_write_stage_idx_ == Base::kStages) {
// Wrap back around to the 'start' of the circular buffer in shared memory
smem_iterator_A_.add_tile_offset({0, -Base::kStages});
smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx_ = 0;
}
}
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B,
int group_start_A = 0, int group_start_B = 0) {
iterator_A.set_iteration_index(group_start_A *
IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_A.get();
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, iterator_A.valid());
} else {
cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, iterator_A.valid());
}
++iterator_A;
}
++this->smem_iterator_A_;
}
}
iterator_B.set_iteration_index(group_start_B *
IteratorB::kAccessesPerVector);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B.get();
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B.valid());
} else {
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, iterator_B.valid());
}
++iterator_B;
}
++this->smem_iterator_B_;
}
}
}
/// GEMM prologue. Bootstrap the global->shared memory pipeline by fetching
/// the global fragments needed by the first kStages-1 threadblock mainloop iterations
CUTLASS_DEVICE
void prologue(
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B, ///< [in|out] iterator over B operand in global memory
int &gemm_k_iterations) ///< [in|out] number of threadblock mainloop iterations remaining
{
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) {
// Disable global fetching if done with global fetch iterations
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
int src_bytes = (iterator_A.valid() ? kSrcBytes : 0);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, iterator_B.get(), iterator_B.valid());
++iterator_B;
}
++this->smem_iterator_B_;
}
// Move to the next write stage
advance_smem_write_stage(iterator_A, iterator_B);
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
// Optionally clear the remaining stages of SMEM. This is a functional requirement for
// some kernels so that all accumulator elements outside the GEMM footprint are zero.
if (SharedMemoryClear == SharedMemoryClearOption::kClearLastStage) {
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA last_smem_iterator_A(this->smem_iterator_A_);
typename IteratorA::AccessType zero_A;
zero_A.clear();
last_smem_iterator_A.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
last_smem_iterator_A.get());
*dst_ptr = zero_A;
++last_smem_iterator_A;
}
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB last_smem_iterator_B(this->smem_iterator_B_);
typename IteratorB::AccessType zero_B;
zero_B.clear();
last_smem_iterator_B.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
last_smem_iterator_B.get());
*dst_ptr = zero_B;
++last_smem_iterator_B;
}
}
}
/// Wait until we have at least one completed global fetch stage
CUTLASS_DEVICE
void gmem_wait()
{
// Wait until we have at least one committed global fetch stage. (#uncommitted = Base::kStages - 1 - #committed)
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
}
/// Perform a threadblock mainloop iteration of matrix multiply-accumulate
CUTLASS_DEVICE
void mac_loop_iter(
PipeState &pipe_state, ///< [in|out] loop-carried pipeline state
FragmentC &accum, ///< [in|out] destination accumulator tile
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B, ///< [in|out] iterator over B operand in global memory
int &gemm_k_iterations) ///< [in|out] number of threadblock mainloop iterations remaining
{
// Unroll the warp-level MMA tiles of a threadblock's mainloop iteration
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load the next warp-tile's A fragment from shared memory
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(pipe_state.warp_loaded_frag_A_[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
// Load the next warp-tile's B fragment from shared memory
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.load(pipe_state.warp_loaded_frag_B_[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_B_;
// Except for the first warp-tile, all warp-tiles convert their incoming shared memory fragments as necessary
if (warp_mma_k > 0) {
warp_mma_.transform(
pipe_state.warp_transformed_frag_A_[warp_mma_k % 2],
pipe_state.warp_transformed_frag_B_[warp_mma_k % 2],
pipe_state.warp_loaded_frag_A_[warp_mma_k % 2],
pipe_state.warp_loaded_frag_B_[warp_mma_k % 2]);
}
// Execute the current warp-tile of MMA operations
if (Detail::kStagedAccumulation) {
warp_mma_(
pipe_state.tmp_accum_,
pipe_state.warp_transformed_frag_A_[warp_mma_k % 2],
pipe_state.warp_transformed_frag_B_[warp_mma_k % 2],
pipe_state.tmp_accum_
);
if (warp_mma_k == 0) {
plus<FragmentC> plus_accum;
accum = plus_accum(accum, pipe_state.tmp_accum_);
pipe_state.tmp_accum_.clear();
}
} else {
warp_mma_(
accum,
pipe_state.warp_transformed_frag_A_[warp_mma_k % 2],
pipe_state.warp_transformed_frag_B_[warp_mma_k % 2],
accum
);
}
// Except for the last warp-tile, all warp-tiles issue their share of
// global->shared fragment copies
if (warp_mma_k < Base::kWarpGemmIterations - 1) {
int group_start_iteration_A, group_start_iteration_B;
group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA;
group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB;
copy_tiles_and_advance(
iterator_A,
iterator_B,
group_start_iteration_A,
group_start_iteration_B);
}
// The second-to-last warp-tile also:
// - performs the last warp-tile's share of global->shared fragment copies
// - moves to the next global fetch stage
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
// Performs the last warp-tile's share of global->shared fragment copies
int group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA;
int group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB;
copy_tiles_and_advance(
iterator_A,
iterator_B,
group_start_iteration_A,
group_start_iteration_B);
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Wait until we have at least one completed global fetch stage
gmem_wait();
// Move to the next global fetch stage
advance_smem_write_stage(iterator_A, iterator_B);
advance_smem_read_stage();
// Disable global fetching when done with global fetch iterations
--gemm_k_iterations;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
}
// The last warp-tile also converts the shared memory fragments used by
// the first warp-tile of the next iteration, if necessary (so we can
// immediately start issuing MMA instructions at the top of the loop )
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
warp_mma_.transform(
pipe_state.warp_transformed_frag_A_[(warp_mma_k + 1) % 2],
pipe_state.warp_transformed_frag_B_[(warp_mma_k + 1) % 2],
pipe_state.warp_loaded_frag_A_[(warp_mma_k + 1) % 2],
pipe_state.warp_loaded_frag_B_[(warp_mma_k + 1) % 2]);
}
}
}
/// Perform the specified number of threadblock mainloop iterations of matrix
/// multiply-accumulate. Assumes prologue has been initiated.
CUTLASS_DEVICE
void gemm_iters(
int gemm_k_iterations, ///< number of threadblock mainloop iterations
FragmentC &accum, ///< [in|out] accumulator tile
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B) ///< [in|out] iterator over B operand in global memory
{
PipeState pipe_state;
// Disable global fetching if done with global fetch iterations
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
// Load first warp-tile's A fragment from shared memory
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(pipe_state.warp_loaded_frag_A_[0]);
++this->warp_tile_iterator_A_;
// Load first warp-tile's B fragment from shared memory
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_B_.load(pipe_state.warp_loaded_frag_B_[0]);
++this->warp_tile_iterator_B_;
// Transform, if necessary, the first warp-tile's shared memory fragments
warp_mma_.transform(
pipe_state.warp_transformed_frag_A_[0],
pipe_state.warp_transformed_frag_B_[0],
pipe_state.warp_loaded_frag_A_[0],
pipe_state.warp_loaded_frag_B_[0]);
if (Detail::kStagedAccumulation) {
pipe_state.tmp_accum_.clear();
}
// Mainloop
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
mac_loop_iter(
pipe_state,
accum,
iterator_A,
iterator_B,
gemm_k_iterations);
}
if (Detail::kStagedAccumulation) {
plus<FragmentC> plus_accum;
accum = plus_accum(accum, pipe_state.tmp_accum_);
}
// Optionally commit and drain all pending and predicated LDGSTS pnz from the GEMM mainloop
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
}
/// Prepares the class for another prologue.
CUTLASS_DEVICE
void wind_down()
{
// Catch-up the smem-read iterator to the smem-write iterator (so this class can be reused for another tile's prologue)
// First, increment remaining warp tiles to get to the next full stage. (Ideally we would
// just decrement one tile, but not all iterators implement --() decrement.)
#pragma unroll
for (int warp_mma_k = 1; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k)
{
this->warp_tile_iterator_A_.set_kgroup_index(warp_mma_k);
this->warp_tile_iterator_B_.set_kgroup_index(warp_mma_k);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
}
smem_read_stage_idx_++;
// Then wrap back two full stages (one for the tile advancing we just did, and one to catch the write iterators)
static const int kStageIters = Policy::kPartitionsK * Base::kWarpGemmIterations;
if (smem_read_stage_idx_ > 1)
{
this->warp_tile_iterator_A_.add_tile_offset({0, (-2 * kStageIters)});
this->warp_tile_iterator_B_.add_tile_offset({(-2 * kStageIters), 0});
}
else
{
this->warp_tile_iterator_A_.add_tile_offset({0, ((Base::kStages - 2) * kStageIters)});
this->warp_tile_iterator_B_.add_tile_offset({((Base::kStages - 2) * kStageIters), 0});
}
smem_read_stage_idx_ = smem_write_stage_idx_;
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
///< initial value of accumulator
FragmentC const &src_accum) {
// Prologue (start fetching iterations of global fragments into shared memory)
prologue(iterator_A, iterator_B, gemm_k_iterations);
// Wait until we have at least one completed global fetch stage
gmem_wait();
// Initialize destination accumulators with source accumulators
accum = src_accum;
// Perform the MAC-iterations
gemm_iters(gemm_k_iterations, accum, iterator_A, iterator_B);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 28,013 | C | 36.502008 | 123 | 0.640131 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_multistage.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/array_planar_complex.h"
#include "cutlass/functional.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_planar_complex_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Transformation applied to A
ComplexTransform TransformA = ComplexTransform::kNone,
/// Transformation applied to B
ComplexTransform TransformB = ComplexTransform::kNone
>
class MmaPlanarComplexMultistage :
public MmaPlanarComplexBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = MmaPlanarComplexBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Policy describing tuning details
using Policy = Policy_;
///< Archtecture tag
using ArchTag = arch::Sm80;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
/// Transformation applied to A
static ComplexTransform const kTransformA = TransformA;
/// Transformation applied to B
static ComplexTransform const kTransformB = TransformB;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = ArrayPlanarComplex<
typename Policy::Operator::FragmentC::Element,
Policy::Operator::FragmentC::kElements
>;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Internal structure exposed for introspection.
struct Detail {
static_assert(Base::kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
/// Number of LDGSTS instructions to load one stage of operand A
static int const TBLDGSTSIterationsA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of LDGSTS instructions to load one stage of operand B
static int const TBLDGSTSIterationsB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of LDGSTS instructions to load on group of operand A
static int const kAccessesPerGroupA =
(TBLDGSTSIterationsA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of LDGSTS instructions to load on group of operand B
static int const kAccessesPerGroupB =
(TBLDGSTSIterationsB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
};
private:
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaPlanarComplexMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
private:
CUTLASS_DEVICE
void copy_tiles_and_advance(
IteratorA &iterator_A_real,
IteratorA &iterator_A_imag,
IteratorB &iterator_B_real,
IteratorB &iterator_B_imag,
int group_start_A = 0,
int group_start_B = 0) {
iterator_A_real.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector);
iterator_A_imag.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// LDGSTS for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(this->smem_iterator_A_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
auto gmem_ptr_real = iterator_A_real.get();
auto gmem_ptr_imag = iterator_A_imag.get();
bool pred_guard = iterator_A_real.valid();
cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
dst_ptr + v,
gmem_ptr_real,
pred_guard);
cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
dst_ptr + v + (Base::SharedStorage::kImaginaryStrideA / IteratorA::ThreadMap::kElementsPerAccess),
reinterpret_cast<char const *>(gmem_ptr_imag),
pred_guard);
++iterator_A_real;
++iterator_A_imag;
}
++this->smem_iterator_A_;
}
iterator_B_real.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector);
iterator_B_imag.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// LDGSTS for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(this->smem_iterator_B_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
auto gmem_ptr_real = iterator_B_real.get();
auto gmem_ptr_imag = iterator_B_imag.get();
bool pred_guard = iterator_B_real.valid();
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
dst_ptr + v,
gmem_ptr_real,
pred_guard);
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
dst_ptr + v + (Base::SharedStorage::kImaginaryStrideB / IteratorB::ThreadMap::kElementsPerAccess),
reinterpret_cast<char const *>(gmem_ptr_imag),
pred_guard);
++iterator_B_real;
++iterator_B_imag;
}
++this->smem_iterator_B_;
}
}
CUTLASS_DEVICE
void warp_mma_planar_complex(
Operator & warp_mma,
FragmentC &accum,
WarpFragmentA const & real_A,
WarpFragmentA const & imag_A,
WarpFragmentB const & real_B,
WarpFragmentB const & imag_B) {
cutlass::negate<Array<typename WarpFragmentB::Element, WarpFragmentB::kElements>> neg_op_B;
WarpFragmentB neg_real_B = neg_op_B(real_B);
WarpFragmentB neg_imag_B = neg_op_B(imag_B);
warp_mma(accum.real, real_A, real_B, accum.real);
if (kTransformB == ComplexTransform::kNone) {
warp_mma(accum.imag, real_A, imag_B, accum.imag);
}
else {
warp_mma(accum.imag, real_A, neg_imag_B, accum.imag);
}
if (kTransformA == ComplexTransform::kNone) {
warp_mma(accum.imag, imag_A, real_B, accum.imag);
}
else {
warp_mma(accum.imag, imag_A, neg_real_B, accum.imag);
}
if (kTransformA == ComplexTransform::kNone ^ kTransformB == ComplexTransform::kNone) {
warp_mma(accum.real, imag_A, imag_B, accum.real);
}
else {
warp_mma(accum.real, imag_A, neg_imag_B, accum.real);
}
}
public:
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A_real,
///< iterator over A operand in global memory
IteratorA iterator_A_imag,
///< iterator over B operand in global memory
IteratorB iterator_B_real,
///< iterator over B operand in global memory
IteratorB iterator_B_imag,
///< initial value of accumulator
FragmentC const &src_accum) {
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A_real.clear_mask(gemm_k_iterations == 0);
iterator_A_imag.clear_mask(gemm_k_iterations == 0);
iterator_B_real.clear_mask(gemm_k_iterations == 0);
iterator_B_imag.clear_mask(gemm_k_iterations == 0);
iterator_A_real.set_iteration_index(0);
iterator_A_imag.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// LDGSTS for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLDGSTSIterationsA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8;
bool pred_guard = iterator_A_real.valid();
auto src_ptr_real = iterator_A_real.get();
auto src_ptr_imag = iterator_A_imag.get();
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, src_ptr_real, pred_guard);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v +
Base::SharedStorage::kImaginaryStrideA /
IteratorA::ThreadMap::kElementsPerAccess,
reinterpret_cast<char const *>(src_ptr_imag),
pred_guard);
++iterator_A_real;
++iterator_A_imag;
}
++this->smem_iterator_A_;
}
iterator_B_real.set_iteration_index(0);
iterator_B_imag.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// LDGSTS for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLDGSTSIterationsB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8;
bool pred_guard = iterator_B_real.valid();
auto src_ptr_real = iterator_B_real.get();
auto src_ptr_imag = iterator_B_imag.get();
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, src_ptr_real, pred_guard);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v +
Base::SharedStorage::kImaginaryStrideB /
IteratorB::ThreadMap::kElementsPerAccess,
reinterpret_cast<char const *>(src_ptr_imag),
pred_guard);
++iterator_B_real;
++iterator_B_imag;
}
++this->smem_iterator_B_;
}
// Move to the next stage
iterator_A_real.add_tile_offset({0, 1});
iterator_A_imag.add_tile_offset({0, 1});
iterator_B_real.add_tile_offset({1, 0});
iterator_B_imag.add_tile_offset({1, 0});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Inserts a memory fence between stages of cp.async instructions
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Blocks until all but kStages-2 cp.async stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpFragmentA warp_frag_real_A[2];
WarpFragmentA warp_frag_imag_A[2];
WarpFragmentB warp_frag_real_B[2];
WarpFragmentB warp_frag_imag_B[2];
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_real_A[0]);
this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[0], Base::SharedStorage::kImaginaryStrideA);
this->warp_tile_iterator_B_.load(warp_frag_real_B[0]);
this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[0], Base::SharedStorage::kImaginaryStrideB);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
iterator_A_real.clear_mask(gemm_k_iterations == 0);
iterator_A_imag.clear_mask(gemm_k_iterations == 0);
iterator_B_real.clear_mask(gemm_k_iterations == 0);
iterator_B_imag.clear_mask(gemm_k_iterations == 0);
// Start issuing the first group of the next stage outside of the mainloop
copy_tiles_and_advance(iterator_A_real, iterator_A_imag, iterator_B_real, iterator_B_imag);
Operator warp_mma;
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_real_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideA);
this->warp_tile_iterator_B_.load(warp_frag_real_B[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideB);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
// Issue global->shared copies for the next stage
int group_start_iteration_A, group_start_iteration_B;
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
group_start_iteration_A = 0;
group_start_iteration_B = 0;
}
else {
group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB;
}
copy_tiles_and_advance(
iterator_A_real,
iterator_A_imag,
iterator_B_real,
iterator_B_imag,
group_start_iteration_A,
group_start_iteration_B);
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
// Inserts a memory fence between stages of cp.async instructions
cutlass::arch::cp_async_fence();
// Blocks until all but kStages-2 cp.async stages have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A_real.add_tile_offset({0, 1});
iterator_A_imag.add_tile_offset({0, 1});
iterator_B_real.add_tile_offset({1, 0});
iterator_B_imag.add_tile_offset({1, 0});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
iterator_A_real.clear_mask(gemm_k_iterations == 0);
iterator_A_imag.clear_mask(gemm_k_iterations == 0);
iterator_B_real.clear_mask(gemm_k_iterations == 0);
iterator_B_imag.clear_mask(gemm_k_iterations == 0);
}
warp_mma_planar_complex(
warp_mma,
accum,
warp_frag_real_A[warp_mma_k % 2],
warp_frag_imag_A[warp_mma_k % 2],
warp_frag_real_B[warp_mma_k % 2],
warp_frag_imag_B[warp_mma_k % 2]);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 22,805 | C | 34.468118 | 141 | 0.625872 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/mma_blas3_multistage.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
Used by BLAS3 kernels that need to treat diagonal elements of a input iterator as a special case.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kZfill,
/// Blas3 computation mode
BlasMode BlasMode_ = BlasMode::kTriangular,
/// Used for partial specialization
typename Enable = bool>
class MmaBlas3Multistage :
public MmaBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = MmaBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Policy describing tuning details
using Policy = Policy_;
///< Blas Mode
static BlasMode const kBlasMode = BlasMode_;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Minimum architecture is Sm80 to support cp.async
using ArchTag = arch::Sm80;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
/// Internal structure exposed for introspection.
struct Detail {
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA =
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
};
private:
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaBlas3Multistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B,
int group_start_A = 0, int group_start_B = 0) {
iterator_A.set_iteration_index(group_start_A *
IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_A.get();
bool isvalid = iterator_A.valid();
if (isvalid && iterator_A.getOnDiag()) {
// Elements that are on diagonal
if (kBlasMode == BlasMode::kHermitian && cutlass::is_complex<typename IteratorA::Element>::value) {
/* Copy real part from gmem, write zero for imag part in smem */
/* The following logic to determine kSizeRealBytes is so that compiler doesn't complain when
* compiling for not complex datatype and using half the size for cp_async_zfill */
int const kSizeRealBytes = (platform::is_same<typename IteratorA::Element,
complex<double>>::value) ? 8 : 4;
cutlass::arch::cp_async_zfill<kSizeRealBytes, cutlass::arch::CacheOperation::Always>(
dst_ptr + v, gmem_ptr, true);
cutlass::arch::cp_async_diag<typename IteratorA::Element, true>(
reinterpret_cast<char *> (dst_ptr + v) + kSizeRealBytes);
} else {
/* Write one (1) directly to smem*/
cutlass::arch::cp_async_diag<typename IteratorA::Element>(dst_ptr + v);
}
} else {
// Elements that are not of diagonal
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, isvalid);
}
++iterator_A;
}
++this->smem_iterator_A_;
}
}
iterator_B.set_iteration_index(group_start_B *
IteratorB::kAccessesPerVector);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B.get();
bool isvalid = iterator_B.valid();
if (isvalid && iterator_B.getOnDiag()) {
// Elements that are on diagonal
if (kBlasMode == BlasMode::kHermitian && cutlass::is_complex<typename IteratorB::Element>::value) {
/* Copy real part from gmem, write zero for imag part in smem */
int const kSizeRealBytes = (platform::is_same<typename IteratorB::Element,
complex<double>>::value) ? 8 : 4;
cutlass::arch::cp_async_zfill<kSizeRealBytes, cutlass::arch::CacheOperation::Always>(
dst_ptr + v, gmem_ptr, true);
cutlass::arch::cp_async_diag<typename IteratorB::Element, true>(
reinterpret_cast<char *> (dst_ptr + v) + kSizeRealBytes);
} else {
/* Write one (1) directly to smem*/
cutlass::arch::cp_async_diag<typename IteratorB::Element>(dst_ptr + v);
}
} else {
// Elements that are not of diagonal
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, isvalid);
}
++iterator_B;
}
++this->smem_iterator_B_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
///< initial value of accumulator
FragmentC const &src_accum) {
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
auto gmem_ptr = iterator_A.get();
bool isvalid = iterator_A.valid();
if (isvalid && iterator_A.getOnDiag()) {
// Elements that are on diagonal
if (kBlasMode == BlasMode::kHermitian && cutlass::is_complex<typename IteratorA::Element>::value) {
/* Copy real part from gmem, write zero for imag part in smem */
int const kSizeRealBytes = (platform::is_same<typename IteratorA::Element,
complex<double>>::value) ? 8 : 4;
cutlass::arch::cp_async_zfill<kSizeRealBytes, cutlass::arch::CacheOperation::Always>(
dst_ptr + v, gmem_ptr, true);
cutlass::arch::cp_async_diag<typename IteratorA::Element, true>(
reinterpret_cast<char *> (dst_ptr + v) + kSizeRealBytes);
} else {
/* Write one (1) directly to smem*/
cutlass::arch::cp_async_diag<typename IteratorA::Element>(dst_ptr + v);
}
} else {
// Elements that are not of diagonal
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, gmem_ptr, isvalid);
}
++iterator_A;
}
++this->smem_iterator_A_;
}
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
auto gmem_ptr = iterator_B.get();
bool isvalid = iterator_B.valid();
if (isvalid && iterator_B.getOnDiag()) {
// Elements that are on diagonal
if (kBlasMode == BlasMode::kHermitian && cutlass::is_complex<typename IteratorB::Element>::value) {
/* Copy real part from gmem, write zero for imag part in smem */
int const kSizeRealBytes = (platform::is_same<typename IteratorB::Element,
complex<double>>::value) ? 8 : 4;
cutlass::arch::cp_async_zfill<kSizeRealBytes, cutlass::arch::CacheOperation::Always>(
dst_ptr + v, gmem_ptr, true);
cutlass::arch::cp_async_diag<typename IteratorB::Element, true>(
reinterpret_cast<char *> (dst_ptr + v) + kSizeRealBytes);
} else {
/* Write one (1) directly to smem*/
cutlass::arch::cp_async_diag<typename IteratorB::Element>(dst_ptr + v);
}
} else {
// Elements that are not of diagonal
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, gmem_ptr, isvalid);
}
++iterator_B;
}
++this->smem_iterator_B_;
}
// Move to the next stage
iterator_A.add_tile_offset({0, 1});
iterator_B.add_tile_offset({1, 0});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum = src_accum;
//
// Clear the remaining tiles of SMEM. This is a functional requirement for some kernels
// so that all accumulator elements outside the GEMM footprint are zero.
//
if (SharedMemoryClear == SharedMemoryClearOption::kClearLastStage) {
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA last_smem_iterator_A(this->smem_iterator_A_);
typename IteratorA::AccessType zero_A;
zero_A.clear();
last_smem_iterator_A.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
last_smem_iterator_A.get());
*dst_ptr = zero_A;
++last_smem_iterator_A;
}
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB last_smem_iterator_B(this->smem_iterator_B_);
typename IteratorB::AccessType zero_B;
zero_B.clear();
last_smem_iterator_B.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
last_smem_iterator_B.get());
*dst_ptr = zero_B;
++last_smem_iterator_B;
}
}
// Waits until kStages-2 stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA warp_loaded_frag_A[2];
WarpLoadedFragmentB warp_loaded_frag_B[2];
WarpTransformedFragmentA warp_transformed_frag_A[2];
WarpTransformedFragmentB warp_transformed_frag_B[2];
Operator warp_mma;
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0],
warp_loaded_frag_A[0], warp_loaded_frag_B[0]);
// tf32x3 kernels use staging accumulation. warp_mma uses a temporary
// accumulator and this temporary accumulator is added to the final
// accumulator once in every mainloop iteration.
plus<FragmentC> plus_accum;
FragmentC tmp_accum;
if (platform::is_same<typename Operator::MathOperator,
arch::OpMultiplyAddFastF32>::value
|| platform::is_same<typename Operator::MathOperator,
arch::OpMultiplyAddComplexFastF32>::value) {
tmp_accum.clear();
}
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k > 0)
warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
warp_loaded_frag_A[warp_mma_k % 2],
warp_loaded_frag_B[warp_mma_k % 2]);
if (platform::is_same<typename Operator::MathOperator,
arch::OpMultiplyAddFastF32>::value
|| platform::is_same<typename Operator::MathOperator,
arch::OpMultiplyAddComplexFastF32>::value) {
warp_mma(
tmp_accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
tmp_accum
);
if (warp_mma_k == 0) {
accum = plus_accum(accum, tmp_accum);
tmp_accum.clear();
}
} else {
warp_mma(
accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
accum
);
}
// Issue global->shared copies for the this stage
if (warp_mma_k < Base::kWarpGemmIterations - 1) {
int group_start_iteration_A, group_start_iteration_B;
group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA;
group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB;
copy_tiles_and_advance(iterator_A, iterator_B, group_start_iteration_A,
group_start_iteration_B);
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
int group_start_iteration_A, group_start_iteration_B;
group_start_iteration_A =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB;
copy_tiles_and_advance(iterator_A, iterator_B, group_start_iteration_A,
group_start_iteration_B);
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A.add_tile_offset({0, 1});
iterator_B.add_tile_offset({1, 0});
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
iterator_A.clear_mask(gemm_k_iterations == 0);
iterator_B.clear_mask(gemm_k_iterations == 0);
}
// Do any conversions feeding the first stage at the end of the loop so
// we can start right away on mma instructions
if (warp_mma_k + 1 == Base::kWarpGemmIterations)
warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_transformed_frag_B[(warp_mma_k + 1) % 2],
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
}
}
if (platform::is_same<typename Operator::MathOperator,
arch::OpMultiplyAddFastF32>::value
|| platform::is_same<typename Operator::MathOperator,
arch::OpMultiplyAddComplexFastF32>::value) {
accum = plus_accum(accum, tmp_accum);
}
if (SharedMemoryClear == SharedMemoryClearOption::kZfill) {
// commit and drain all pending and predicated LDGSTS pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 27,413 | C | 37.995733 | 111 | 0.602524 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming
expectations about data layout of the global memory fragments, data types,
and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting sparse
TensorOp instructions.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/gemm/warp/default_mma_sparse_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h"
#include "cutlass/gemm/threadblock/mma_sparse_multistage.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Template defininng default matrix multiply operators inferred from threadblock tile size,
/// global memory data layout, and target math instruction.
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false
/// Cache operation of operand A
, cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global
>
struct DefaultSparseMmaCore;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultSparseMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static int const kSparse = 2;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value, int(128 / sizeof(ElementA))>;
// Shared memory layout
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementB>::value, int(128 / sizeof(ElementB))>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK / kSparse>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK / kSparse>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Cache operation of operand E
static cutlass::arch::CacheOperation::Kind const kCacheOpE =
cutlass::arch::CacheOperation::Global;
static int const kInterleavedE = MmaTensorOp::kInterleaved;
static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits;
static int const kMaxID2 = MmaTensorOp::kMaxID2;
static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE;
using ElementE = typename MmaTensorOp::ElementE;
using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved<kInterleavedE>;
// Shared memory layout. Interleaved layout is mapped to PitchLinear layout.
using SmemLayoutE = typename MmaTensorOp::LayoutE;
/// ThreadMap of iterator E
static int const kElementsPerAccessE =
kAccessSizeInBits / sizeof_bits<ElementE>::value;
/// E is tiny. Not all warps are needed.
static int const kThreadsE =
(Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value) >
kThreads)
? kThreads
: (Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value));
using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE /
kInterleavedE>,
kThreadsE, kElementsPerAccessE>;
/// Shared memory iterator to E operand
using SmemIteratorE = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE / kInterleavedE>,
ElementE, SmemLayoutE, 0, IteratorThreadMapE>;
/// Policy used to define MmaPipelined
using MmaPolicy =
SparseMmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultSparseMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static int const kSparse = 2;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / kSparse / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
// crosswise cannot be larger than 1024 bit.
static int const kCrosswiseB =
(Shape::kK > (1024 / sizeof_bits<ElementB>::value))
? (1024 / sizeof_bits<ElementB>::value)
: Shape::kK;
static int const kWarpThreadArrangementContiguousB =
kCrosswiseB / (kAccessSizeInBits / sizeof_bits<ElementB>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK / kSparse>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, kCrosswiseB>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK / kSparse, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK / kSparse>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Cache operation of operand E
static cutlass::arch::CacheOperation::Kind const kCacheOpE =
cutlass::arch::CacheOperation::Global;
static int const kInterleavedE = MmaTensorOp::kInterleaved;
static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits;
static int const kMaxID2 = MmaTensorOp::kMaxID2;
static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE;
using ElementE = typename MmaTensorOp::ElementE;
using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved<kInterleavedE>;
// Shared memory layout. Interleaved layout is mapped to PitchLinear layout.
using SmemLayoutE = typename MmaTensorOp::LayoutE;
/// ThreadMap of iterator E
static int const kElementsPerAccessE =
kAccessSizeInBits / sizeof_bits<ElementE>::value;
/// E is tiny. Not all warps are needed.
static int const kThreadsE =
(Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value) >
kThreads)
? kThreads
: (Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value));
using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE /
kInterleavedE>,
kThreadsE, kElementsPerAccessE>;
/// Shared memory iterator to E operand
using SmemIteratorE = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE / kInterleavedE>,
ElementE, SmemLayoutE, 0, IteratorThreadMapE>;
/// Policy used to define MmaPipelined
using MmaPolicy =
SparseMmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultSparseMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static int const kSparse = 2;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
// crosswise cannot be larger than 1024 bit.
static int const kCrosswiseB =
(Shape::kK > (1024 / sizeof_bits<ElementB>::value))
? (1024 / sizeof_bits<ElementB>::value)
: Shape::kK;
static int const kWarpThreadArrangementContiguousB =
kCrosswiseB / (kAccessSizeInBits / sizeof_bits<ElementB>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value, int(128 / sizeof(ElementA))>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, kCrosswiseB>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK / kSparse>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK / kSparse>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Cache operation of operand E
static cutlass::arch::CacheOperation::Kind const kCacheOpE =
cutlass::arch::CacheOperation::Global;
static int const kInterleavedE = MmaTensorOp::kInterleaved;
static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits;
static int const kMaxID2 = MmaTensorOp::kMaxID2;
static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE;
using ElementE = typename MmaTensorOp::ElementE;
using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved<kInterleavedE>;
// Shared memory layout. Interleaved layout is mapped to PitchLinear layout.
using SmemLayoutE = typename MmaTensorOp::LayoutE;
/// ThreadMap of iterator E
static int const kElementsPerAccessE =
kAccessSizeInBits / sizeof_bits<ElementE>::value;
/// E is tiny. Not all warps are needed.
static int const kThreadsE =
(Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value) >
kThreads)
? kThreads
: (Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value));
using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE /
kInterleavedE>,
kThreadsE, kElementsPerAccessE>;
/// Shared memory iterator to E operand
using SmemIteratorE = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE / kInterleavedE>,
ElementE, SmemLayoutE, 0, IteratorThreadMapE>;
/// Policy used to define MmaPipelined
using MmaPolicy =
SparseMmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultSparseMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static int const kSparse = 2;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / kSparse / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK / kSparse>;
// Shared memory layout
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementB>::value, int(128 / sizeof(ElementB))>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK / kSparse, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK / kSparse>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultSparseMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Cache operation of operand E
static cutlass::arch::CacheOperation::Kind const kCacheOpE =
cutlass::arch::CacheOperation::Global;
static int const kInterleavedE = MmaTensorOp::kInterleaved;
static int const kMetaSizeInBits = MmaTensorOp::kMetaSizeInBits;
static int const kMaxID2 = MmaTensorOp::kMaxID2;
static int const kElementsPerElementE = MmaTensorOp::kElementsPerElementE;
using ElementE = typename MmaTensorOp::ElementE;
using GmemLayoutE = cutlass::layout::ColumnMajorInterleaved<kInterleavedE>;
// Shared memory layout. Interleaved layout is mapped to PitchLinear layout.
using SmemLayoutE = typename MmaTensorOp::LayoutE;
/// ThreadMap of iterator E
static int const kElementsPerAccessE =
kAccessSizeInBits / sizeof_bits<ElementE>::value;
/// E is tiny. Not all warps are needed.
static int const kThreadsE =
(Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value) >
kThreads)
? kThreads
: (Shape::kM * Shape::kK / kSparse / kElementsPerElementE /
(kAccessSizeInBits / sizeof_bits<ElementE>::value));
using IteratorThreadMapE = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE /
kInterleavedE>,
kThreadsE, kElementsPerAccessE>;
/// Shared memory iterator to E operand
using SmemIteratorE = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM * kInterleavedE,
Shape::kK / kSparse / kElementsPerElementE / kInterleavedE>,
ElementE, SmemLayoutE, 0, IteratorThreadMapE>;
/// Policy used to define MmaPipelined
using MmaPolicy =
SparseMmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| 32,106 | C | 37.451497 | 100 | 0.678253 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm75.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
//
// Shared memory layouts
//
using SmemLayoutA =
layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value, int(128 / sizeof(ElementA))>;
// Shared memory layout
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementB>::value, int(128 / sizeof(ElementB))>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by MMA
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementB>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
0,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
1,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by MMA
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementB>::value, int(128 / sizeof(ElementB))>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
0,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by MMA
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousB =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value, int(128 / sizeof(ElementA))>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Below is for arch::OpMultiplyAddFastF16
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, float,
layout::ColumnMajor, float, layout::RowMajor, float,
LayoutC_, arch::OpClassTensorOp, 2,
arch::OpMultiplyAddFastF16> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = float;
using LayoutA = layout::ColumnMajor;
using ElementB = float;
using LayoutB = layout::RowMajor;
using ElementC = float;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 256;
/// Default Operator
using Operator = arch::OpMultiplyAdd;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<half_t>::value, int(128 / sizeof(half_t))>;
// Shared memory layout
using SmemLayoutB =
layout::RowMajorTensorOpMultiplicandCongruous<sizeof_bits<half_t>::value,
int(128 / sizeof(half_t))>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
half_t,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
half_t,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, float,
layout::RowMajor, float, layout::ColumnMajor, float,
LayoutC_, arch::OpClassTensorOp, 2,
arch::OpMultiplyAddFastF16> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = float;
using LayoutA = layout::RowMajor;
using ElementB = float;
using LayoutB = layout::ColumnMajor;
using ElementC = float;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 256;
/// Default Operator
using Operator = arch::OpMultiplyAdd;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA =
layout::RowMajorTensorOpMultiplicandCrosswise<sizeof_bits<half_t>::value,
Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<half_t>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
half_t,
SmemLayoutA,
0,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
half_t,
SmemLayoutB,
1,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, float,
layout::RowMajor, float, layout::RowMajor, float,
LayoutC_, arch::OpClassTensorOp, 2,
arch::OpMultiplyAddFastF16> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = float;
using LayoutA = layout::RowMajor;
using ElementB = float;
using LayoutB = layout::RowMajor;
using ElementC = float;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 256;
/// Default Operator
using Operator = arch::OpMultiplyAdd;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<half_t>::value, Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<half_t>::value, int(128 / sizeof(half_t))>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
half_t,
SmemLayoutA,
0,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
half_t,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, float,
layout::ColumnMajor, float, layout::ColumnMajor, float,
LayoutC_, arch::OpClassTensorOp, 2,
arch::OpMultiplyAddFastF16> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = float;
using LayoutA = layout::ColumnMajor;
using ElementB = float;
using LayoutB = layout::ColumnMajor;
using ElementC = float;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 256;
/// Default Operator
using Operator = arch::OpMultiplyAdd;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousB =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<half_t>::value, int(128 / sizeof(half_t))>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<half_t>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>, half_t, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>, half_t, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, half_t, SmemLayoutA, half_t, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>,
WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major-interleave
/// B: row-major-interleave
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
///
/// Column/RowMajorInterleved<InterleavedK>(m, n) is mapped to Column/RowMajor(m
/// x InterleavedK, n / InterleavedK) so that Column/RowMajor global iterators
/// can be reused. The shared store iterator is the same as the crosswise shared
/// store iterator. So, the only thing we need to do is to swap the coordinates
/// (contiguous <=> strided) used by the global iterator and the shared store
/// iterator.
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by MMA
typename Operator_,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor,
/// Number of interleaved k
int InterleavedK>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajorInterleaved<InterleavedK>, ElementB_,
layout::RowMajorInterleaved<InterleavedK>, ElementC_,
LayoutC_, arch::OpClassTensorOp, 2, Operator_,
AccumulatorsInRowMajor> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementB = ElementB_;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
static int const kInterleavedK = InterleavedK;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kElementsPerAccess =
kAccessSizeInBits / sizeof_bits<ElementA>::value;
static int const kWarpThreadArrangementContiguous =
kInterleavedK / kElementsPerAccess;
static int const kWarpThreadArrangementStrided =
kWarpSize / kWarpThreadArrangementContiguous;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, kInterleavedK>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, kInterleavedK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedK,
Shape::kK / kInterleavedK>,
kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMap<
IteratorThreadMapA,
layout::PitchLinearShape<kWarpThreadArrangementContiguous,
kWarpThreadArrangementStrided>>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
SmemThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN * kInterleavedK,
Shape::kK / kInterleavedK>,
kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapB = transform::TransposePitchLinearThreadMap<
IteratorThreadMapB,
layout::PitchLinearShape<kWarpThreadArrangementContiguous,
kWarpThreadArrangementStrided>>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
SmemThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK, AccumulatorsInRowMajor>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| 42,310 | C | 32.055469 | 100 | 0.67152 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/default_mma_planar_complex_multistage.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a multistage GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/arch/arch.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/mma_planar_complex_multistage.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transformation on operand B
ComplexTransform TransformB = ComplexTransform::kNone,
/// Math operator tag (e.g. arch::OpMultiplyAdd)
typename Operator = arch::OpMultiplyAdd
>
struct DefaultMmaPlanarComplexMultistage {
// Construct a planar complex variant from the real-valued variant
using RealMmaMultistage = typename DefaultMma<
ElementA_,
LayoutA_,
kAlignmentA,
ElementB_,
LayoutB_,
kAlignmentB,
ElementAccumulator_,
LayoutC_,
OperatorClass_,
ArchTag_,
ThreadblockShape_,
WarpShape_,
InstructionShape_,
Stages,
Operator
>::ThreadblockMma;
using ThreadblockMma = MmaPlanarComplexMultistage<
ThreadblockShape_,
typename RealMmaMultistage::IteratorA,
typename RealMmaMultistage::SmemIteratorA,
cutlass::arch::CacheOperation::Global,
typename RealMmaMultistage::IteratorB,
typename RealMmaMultistage::SmemIteratorB,
cutlass::arch::CacheOperation::Global,
ElementAccumulator_,
LayoutC_,
typename RealMmaMultistage::Policy,
Stages,
TransformA,
TransformB
>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 5,110 | C | 36.306569 | 100 | 0.647162 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/default_mma_core_wmma.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/fast_math.h"
#include "cutlass/arch/wmma.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/gemm/warp/mma_tensor_op_wmma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: wmma tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
///< Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_,
/// Number of stages
int Stages>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassWmmaTensorOp, Stages,
Operator_> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassWmmaTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassWmmaTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
//
// Shared memory layouts
//
// NOTE: shared memory layout for wmma is same as the operands' layout in the global memory
using SmemLayoutA = LayoutA;
using SmemLayoutB = LayoutB;
// Pad shared memory to avoid bank conflicts
static int const kPaddingA = 128 / sizeof_bits<ElementA>::value;
static int const kPaddingB = 128 / sizeof_bits<ElementB>::value;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Wmma<
InstructionShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
Operator
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaTensorOpWmma<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<kPaddingA, 0>,
MatrixShape<0, kPaddingB>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: wmma tensorop class
///
/// This uses the default warp-level operator given tile sizes
template <
///< Shape of threadblock-scoped matrix multiply operator
///< (concept:GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape) [allowed
/// wmma instruction shapes, e.g., 16x16x16, 32x8x16, 8x32x16,...]
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_,
/// Number of stages
int Stages>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassWmmaTensorOp, Stages,
Operator_> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassWmmaTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassWmmaTensorOp>::value;
/// Number of threads per threadblock
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
// shared memory layout for wmma is same as the operands' layout in global memory
using SmemLayoutA = LayoutA;
using SmemLayoutB = LayoutB;
// Pad shared memory to avoid bank conflicts
static int const kPaddingA = 128 / sizeof_bits<ElementA>::value;
static int const kPaddingB = 128 / sizeof_bits<ElementB>::value;
//
// Iterators to write to shared memory
//
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB // SmemThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Wmma<
InstructionShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
Operator
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaTensorOpWmma<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, kPaddingA>,
MatrixShape<kPaddingB, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by MMA
typename Operator_,
/// Number of stages
int Stages>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassWmmaTensorOp, Stages, Operator_> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassWmmaTensorOp;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassWmmaTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
//
// Shared memory layouts
//
// shared memory layout for wmma is same as the operands' layout in global memory
using SmemLayoutA = LayoutA;
using SmemLayoutB = LayoutB;
// Pad shared memory to avoid bank conflicts
static int const kPaddingA = 128 / sizeof_bits<ElementA>::value;
static int const kPaddingB = 128 / sizeof_bits<ElementB>::value;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Wmma<
InstructionShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
Operator
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaTensorOpWmma<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, kPaddingA>,
MatrixShape<0, kPaddingB>,
WarpCount::kK
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by MMA
typename Operator_,
/// Number of stages
int Stages>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassWmmaTensorOp, Stages,
Operator_> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassWmmaTensorOp;
/// Number of warps present
using WarpCount =
GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassWmmaTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousB =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
// shared memory layout for wmma is same as the operands' layout in global memory
using SmemLayoutA = LayoutA;
using SmemLayoutB = LayoutB;
// Pad shared memory to avoid bank conflicts
static int const kPaddingA = 128 / sizeof_bits<ElementA>::value;
static int const kPaddingB = 128 / sizeof_bits<ElementB>::value;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Wmma<
InstructionShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
Operator
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaTensorOpWmma<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<kPaddingA, 0>,
MatrixShape<kPaddingB, 0>,
WarpCount::kK
>;
};
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
#endif // defined(CUTLASS_ARCH_WMMA_ENABLED)
| 20,975 | C | 28.419355 | 100 | 0.665316 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/threadblock_swizzle.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implements several possible threadblock-swizzling functions mapping blockIdx to
GEMM problems.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/gemm/threadblock/index_remat.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle_streamk.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for GEMMs
template <int N = 1>
struct GemmIdentityThreadblockSwizzle {
CUTLASS_HOST_DEVICE
GemmIdentityThreadblockSwizzle() { }
/// Returns the shape of the problem in units of logical tiles
/// *Gemm* problem size: gemm(M, N, K)
CUTLASS_HOST_DEVICE
GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int split_k_slices) const {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
split_k_slices);
}
/// Returns the shape of the problem in units of logical tiles
/// *ImplicitGemm* Conv2d problem size: conv_operator(NPQK, NHWC, KRSC)
CUTLASS_HOST_DEVICE
GemmCoord get_tiled_shape(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
GemmCoord tile_size,
int split_k_slices) const {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
return get_tiled_shape(
implicit_gemm_problem_size, tile_size, split_k_slices);
}
/// Returns the shape of the problem in units of logical tiles
/// *ImplicitGemm* Conv3d problem size: conv_operator(NZPQK, NDHWC, KTRSC)
CUTLASS_HOST_DEVICE
GemmCoord get_tiled_shape(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv3dProblemSize const &problem_size,
GemmCoord tile_size,
int split_k_slices) const {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
return get_tiled_shape(
implicit_gemm_problem_size, tile_size, split_k_slices);
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
dim3 get_grid_shape(GemmCoord tiled_shape) const {
int tile = 1 << get_log_tile(tiled_shape);
return dim3(tiled_shape.m() * tile, (tiled_shape.n() + tile - 1) / tile, tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
int get_log_tile(GemmCoord tiled_shape) const {
auto n = tiled_shape.n();
// Thresholds picked so that it doesn't cause too many no-op CTAs
if (N >= 8 && n >= 6)
return 3;
else if (N >= 4 && n >= 3)
return 2;
else if (N >= 2 && n >= 2)
return 1;
else
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
GemmCoord get_tile_offset(int log_tile) const {
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
int block_idx_z = RematerializeBlockIdxZ();
return GemmCoord{(block_idx_x >> log_tile), //
(block_idx_y << log_tile) + ((block_idx_x) & ((1 << (log_tile)) - 1)),
block_idx_z};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
GemmCoord get_tile_offset(GemmCoord tiled_shape) const {
int const kTile = N;
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
if ((tiled_shape.m() < kTile) || (tiled_shape.n() < kTile))
return GemmCoord{block_idx_x, block_idx_y, RematerializeBlockIdxZ()};
return GemmCoord{
(block_idx_x / kTile),
(block_idx_y * kTile) + (block_idx_x % kTile),
RematerializeBlockIdxZ()
};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for GEMMs
struct GemmHorizontalThreadblockSwizzle {
CUTLASS_HOST_DEVICE
GemmHorizontalThreadblockSwizzle() { }
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int split_k_slices) const {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
split_k_slices);
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
dim3 get_grid_shape(GemmCoord tiled_shape) const {
return dim3(tiled_shape.n(), tiled_shape.m(), tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
int get_log_tile(GemmCoord tiled_shape) const {
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
GemmCoord get_tile_offset(GemmCoord tiled_shape) const {
return GemmCoord{
RematerializeBlockIdxY(),
RematerializeBlockIdxX(),
RematerializeBlockIdxZ()
};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for batched GEMMs
struct GemmBatchedIdentityThreadblockSwizzle {
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int batch_count) const {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
batch_count % (1 << 16));
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
dim3 get_grid_shape(GemmCoord tiled_shape) const {
return dim3(tiled_shape.m(), tiled_shape.n(), tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
int get_log_tile(GemmCoord tiled_shape) const {
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
GemmCoord get_tile_offset(GemmCoord tiled_shape) const {
return GemmCoord{
RematerializeBlockIdxX(),
RematerializeBlockIdxY(),
RematerializeBlockIdxZ()
};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
GemmCoord get_tile_offset(int log_tile) const {
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
int block_idx_z = RematerializeBlockIdxZ();
return GemmCoord{(block_idx_x >> log_tile), //
(block_idx_y << log_tile) + ((block_idx_x) & ((1 << (log_tile)) - 1)),
block_idx_z};
}
/// Gets the batch index
CUTLASS_DEVICE
int get_batch_idx() const {
return RematerializeBlockIdxZ();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for split-K GEMMs
template <int N = 1>
struct GemmSplitKIdentityThreadblockSwizzle {
int const kTile = N;
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int partitions) const {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
partitions);
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
int get_log_tile(GemmCoord tiled_shape) const {
auto n = tiled_shape.n();
// Thresholds picked so that it doesn't cause too many no-op CTAs
if (N >= 8 && n >= 6)
return 3;
else if (N >= 4 && n >= 3)
return 2;
else if (N >= 2 && n >= 2)
return 1;
else
return 0;
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
dim3 get_grid_shape(GemmCoord tiled_shape) const {
int tile = 1 << get_log_tile(tiled_shape);
return dim3(tiled_shape.m() * tile, (tiled_shape.n() + tile - 1) / tile, tiled_shape.k());
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
GemmCoord get_tile_offset(int log_tile) const {
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
int block_idx_z = RematerializeBlockIdxZ();
return GemmCoord{(block_idx_x >> log_tile), //
(block_idx_y << log_tile) + ((block_idx_x) & ((1 << (log_tile)) - 1)),
block_idx_z};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
GemmCoord get_tile_offset(GemmCoord tiled_shape) const {
int const kTile = N;
int block_idx_x = RematerializeBlockIdxX();
int block_idx_y = RematerializeBlockIdxY();
if ((tiled_shape.m() < kTile) || (tiled_shape.n() < kTile))
return GemmCoord{block_idx_x, block_idx_y, RematerializeBlockIdxZ()};
return GemmCoord{
(block_idx_x / kTile),
(block_idx_y * kTile) + (block_idx_x % kTile),
RematerializeBlockIdxZ()
};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for split-K GEMMs
struct GemmSplitKHorizontalThreadblockSwizzle {
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
GemmCoord get_tiled_shape(
GemmCoord problem_size,
GemmCoord tile_size,
int partitions) const {
return GemmCoord(
(problem_size.m() + tile_size.m() - 1) / tile_size.m(),
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
partitions);
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
dim3 get_grid_shape(GemmCoord tiled_shape) const {
return dim3(tiled_shape.n(), tiled_shape.m(), tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
int get_log_tile(GemmCoord tiled_shape) const {
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
GemmCoord get_tile_offset(int log_tile) const {
return GemmCoord{
RematerializeBlockIdxY(),
RematerializeBlockIdxX(),
RematerializeBlockIdxZ()
};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
GemmCoord get_tile_offset(GemmCoord tiled_shape) const {
return GemmCoord{
RematerializeBlockIdxY(),
RematerializeBlockIdxX(),
RematerializeBlockIdxZ()
};
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for batched GEMVs
struct GemvBatchedStridedThreadblockDefaultSwizzle {
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
BatchedGemmCoord get_tiled_shape(
BatchedGemmCoord problem_size,
BatchedGemmCoord tile_size) const {
return BatchedGemmCoord(
1, // M is always 1
(problem_size.n() + tile_size.n() - 1) / tile_size.n(),
(problem_size.k() + tile_size.k() - 1) / tile_size.k(),
(problem_size.batch() + tile_size.batch() - 1) / tile_size.batch());
}
/// Computes CUDA grid dimensions given a size in units of logical tiles
CUTLASS_HOST_DEVICE
dim3 get_grid_shape(BatchedGemmCoord tiled_shape) const {
return dim3(tiled_shape.n(), tiled_shape.batch(), tiled_shape.k());
}
/// Calculates optimal swizzle width
CUTLASS_HOST_DEVICE
int get_log_tile(GemmCoord tiled_shape) const {
return 0;
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
BatchedGemmCoord get_tile_offset(int log_tile) const {
return BatchedGemmCoord{
0, // M is always 1
RematerializeBlockIdxX(),
RematerializeBlockIdxZ(),
RematerializeBlockIdxY(),
};
}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
BatchedGemmCoord get_tile_offset() const {
return BatchedGemmCoord{
0, // M is always 1
RematerializeBlockIdxX(),
RematerializeBlockIdxZ(),
RematerializeBlockIdxY(),
};
}
/// Gets the batch tile index
CUTLASS_DEVICE
int get_batch_tile_idx() const {
return RematerializeBlockIdxY();
}
/// Gets the absolute batch index
CUTLASS_DEVICE
int get_batch_idx() const {
return RematerializeBlockDimY()*RematerializeBlockIdxY() + RematerializeThreadIdxY();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| 15,007 | C | 31.626087 | 100 | 0.635304 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/mma_pipelined.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Transformation applied to A operand
typename TransformA_ = NumericArrayConverter<
typename SmemIteratorA_::Element,
typename IteratorA_::Element,
IteratorA_::Fragment::kElements>,
///
/// Transformation applied to B operand
typename TransformB_ = NumericArrayConverter<
typename SmemIteratorB_::Element,
typename IteratorB_::Element,
IteratorB_::Fragment::kElements>,
/// Used for partial specialization
typename Enable = bool
>
class MmaPipelined : public MmaBase<Shape_, Policy_, 2> {
public:
///< Base class
using Base = MmaBase<Shape_, Policy_, 2>;
using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory
using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory
using ElementC = ElementC_; ///< Data type of accumulator matrix
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
using Policy = Policy_; ///< Policy describing tuning details
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
//
// Dependent types
//
/// Fragment of operand A loaded from global memory
using FragmentA = typename IteratorA::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB = typename IteratorB::Fragment;
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Obtain the arch tag from the warp-level operator
using ArchTag = typename Policy::Operator::ArchTag;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
static_assert((Base::kStages==2), "MmaPipelined requires kStages set to value 2");
protected:
//
// Data members
//
/// Warp-level MMA operator
Operator warp_mma;
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
///< transformation applied to A fragment
TransformA transform_A_;
///< transformation applied to B fragment
TransformB transform_B_;
/// Shared memory write stage index
int smem_write_stage_idx;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaPipelined(
typename Base::SharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM
int thread_idx, ///< ID within the threadblock
int warp_idx, ///< ID of warp
int lane_idx, ///< ID of each thread within a warp
TransformA transform_A = TransformA(), ///< transformation applied to A fragment
TransformB transform_B = TransformB() ///< transformation applied to B fragment
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx),
transform_A_(transform_A),
transform_B_(transform_B),
smem_write_stage_idx(0)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
/// Advance shared memory write-iterators to the next stage
CUTLASS_DEVICE
void advance_smem_write_stage()
{
++this->smem_iterator_A_;
++this->smem_iterator_B_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
smem_write_stage_idx ^= 1;
}
/// Advance shared memory read- and write-iterators to the next stage
CUTLASS_DEVICE
void advance_smem_stages()
{
++this->smem_iterator_A_;
++this->smem_iterator_B_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
// wrap write stage
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
else
{
// wrap read stage
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
}
smem_write_stage_idx ^= 1;
}
/// GEMM prologue. Bootstrap the global->shared memory pipeline by fetching
/// the global fragments needed by the first kStages-1 threadblock mainloop iterations
CUTLASS_DEVICE
void prologue(
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B, ///< [in|out] iterator over B operand in global memory
int &gemm_k_iterations) ///< [in|out] number of threadblock mainloop iterations remaining
{
// The last kblock is loaded in the prolog
// Load A fragment from global A
FragmentA tb_frag_A;
tb_frag_A.clear();
iterator_A.load(tb_frag_A);
++iterator_A;
// Load B fragment from global B
FragmentB tb_frag_B;
tb_frag_B.clear();
iterator_B.load(tb_frag_B);
++iterator_B;
// Store A and B fragments to shared
this->smem_iterator_A_.store(transform_A_(tb_frag_A));
this->smem_iterator_B_.store(transform_B_(tb_frag_B));
// Advance write stage
advance_smem_write_stage();
}
/// Wait until we have at least one completed global fetch stage
CUTLASS_DEVICE
void gmem_wait()
{
__syncthreads();
}
/// Perform the specified number of threadblock mainloop iterations of matrix
/// multiply-accumulate. Assumes prologue has been initiated.
CUTLASS_DEVICE
void gemm_iters(
int gemm_k_iterations, ///< number of threadblock mainloop iterations
FragmentC &accum, ///< [in|out] accumulator tile
IteratorA &iterator_A, ///< [in|out] iterator over A operand in global memory
IteratorB &iterator_B) ///< [in|out] iterator over B operand in global memory
{
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA warp_frag_A[2];
WarpFragmentB warp_frag_B[2];
// Load A fragment from shared A
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_A[0]);
++this->warp_tile_iterator_A_;
// Load B fragment from shared B
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_B_.load(warp_frag_B[0]);
++this->warp_tile_iterator_B_;
// Pair of fragments used to overlap global memory loads and math instructions;
FragmentA tb_frag_A;
FragmentB tb_frag_B;
// Avoid reading out of bounds
iterator_A.clear_mask(gemm_k_iterations <= 1);
iterator_B.clear_mask(gemm_k_iterations <= 1);
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(transform_A_(tb_frag_A));
this->smem_iterator_B_.store(transform_B_(tb_frag_B));
// Wait until we have at least one completed global fetch stage
gmem_wait();
// Advance smem read and write stages
advance_smem_stages();
}
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k == 0) {
// Load fragment from global A
tb_frag_A.clear();
iterator_A.load(tb_frag_A);
++iterator_A;
// Load fragment from global B
tb_frag_B.clear();
iterator_B.load(tb_frag_B);
++iterator_B;
// Avoid reading out of bounds if this was the last loop iteration
iterator_A.clear_mask(gemm_k_iterations <= 2);
iterator_B.clear_mask(gemm_k_iterations <= 2);
}
warp_mma(
accum,
warp_frag_A[warp_mma_k % 2],
warp_frag_B[warp_mma_k % 2],
accum);
}
}
}
/// Prepares the class for another prologue.
CUTLASS_DEVICE
void wind_down()
{
// First, increment remaining warp tiles to catch it up with the write stage.
#pragma unroll
for (int warp_mma_k = 1; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k)
{
this->warp_tile_iterator_A_.set_kgroup_index(warp_mma_k);
this->warp_tile_iterator_B_.set_kgroup_index(warp_mma_k);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
}
// If we bumped the read iterators to the end of the circular buffer, wrap them around to
// align them with the write iterators
if (smem_write_stage_idx == 0)
{
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
int gemm_k_iterations, ///< number of iterations of the mainloop
FragmentC &accum, ///< destination accumulator tile
IteratorA iterator_A, ///< iterator over A operand in global memory
IteratorB iterator_B, ///< iterator over B operand in global memory
FragmentC const &src_accum) ///< source accumulator tile
{
// Prologue
prologue(iterator_A, iterator_B, gemm_k_iterations);
// Wait until we have at least one completed global fetch stage
gmem_wait();
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Perform the MAC-iterations
gemm_iters(gemm_k_iterations, accum, iterator_A, iterator_B);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 15,995 | C | 35.354545 | 126 | 0.641951 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/gemv.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a threadblock-scoped GEMV kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix-vector product using SIMT math instructions.
template <
class Core_ //< GemvCore
>
class Gemv {
public:
using Shape = typename Core_::Shape;
/// The MMA operator that computes GEMV
using Operator = typename Core_::Operator;
/// Iterates over A in global memory
using IteratorA = typename Core_::IteratorA;
/// Iterates over B in global memory
using IteratorB = typename Core_::IteratorB;
/// Fragment of operand C loaded from global memory
using IteratorC = typename Core_::IteratorC;
/// Fragment of operand A loaded from global memory
using FragmentA = typename IteratorA::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB = typename IteratorB::Fragment;
/// Fragment of operand accumulator loaded/stored to global memory
using FragmentC = typename Operator::FragmentC;
/// Shape of the per-thread GEMV operation
using ThreadShape = typename Core_::ThreadShape;
public:
CUTLASS_DEVICE
Gemv() { }
CUTLASS_DEVICE
void operator()(
GemmCoord const &problem_size, ///< problem size of batched GEMV
FragmentC &accum, ///< destination accumulator tile
IteratorA iterator_A, ///< iterator over A operand in global memory
IteratorB iterator_B, ///< iterator over B operand in global memory
FragmentC const &src_accum) { ///< source accumualtor tile
//
// Prologue
//
FragmentA frag_A;
FragmentB frag_B;
frag_A.clear();
frag_B.clear();
iterator_A.load(frag_A);
iterator_B.load(frag_B);
++iterator_A;
++iterator_B;
//
// Mainloop
//
Operator thread_mma;
int gemm_k = problem_size.k();
if (gemm_k < Shape::kK)
{
iterator_A.clear_mask();
iterator_B.clear_mask();
}
// iterate over K to accumulate result
CUTLASS_GEMM_LOOP
for (; gemm_k > 0; gemm_k -= Shape::kK) {
thread_mma(accum, frag_A, frag_B, accum);
iterator_A.load(frag_A);
iterator_B.load(frag_B);
++iterator_A;
++iterator_B;
if (gemm_k < Shape::kK)
{
iterator_A.clear_mask();
iterator_B.clear_mask();
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| 4,726 | C | 30.939189 | 100 | 0.622302 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_pipelined.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_planar_complex_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Transformation applied to A
ComplexTransform TransformA = ComplexTransform::kNone,
/// Transformation applied to B
ComplexTransform TransformB = ComplexTransform::kNone
>
class MmaPlanarComplexPipelined :
public MmaPlanarComplexBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = MmaPlanarComplexBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Policy describing tuning details
using Policy = Policy_;
using ArchTag = typename Policy::Operator::ArchTag;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
/// Transformation applied to A
static ComplexTransform const kTransformA = TransformA;
/// Transformation applied to B
static ComplexTransform const kTransformB = TransformB;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC = ArrayPlanarComplex<
typename Policy::Operator::FragmentC::Element,
Policy::Operator::FragmentC::kElements
>;
/// Warp-level Mma
using Operator = typename Policy::Operator;
private:
using FragmentA = typename IteratorA::Fragment;
using FragmentB = typename IteratorB::Fragment;
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaPlanarComplexPipelined(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
private:
CUTLASS_DEVICE
void warp_mma_planar_complex(
Operator & warp_mma,
FragmentC &accum,
WarpFragmentA const & real_A,
WarpFragmentA const & imag_A,
WarpFragmentB const & real_B,
WarpFragmentB const & imag_B) {
cutlass::negate<Array<typename WarpFragmentB::Element, WarpFragmentB::kElements>> neg_op_B;
WarpFragmentB neg_real_B = neg_op_B(real_B);
WarpFragmentB neg_imag_B = neg_op_B(imag_B);
warp_mma(accum.real, real_A, real_B, accum.real);
if (kTransformB == ComplexTransform::kNone) {
warp_mma(accum.imag, real_A, imag_B, accum.imag);
}
else {
warp_mma(accum.imag, real_A, neg_imag_B, accum.imag);
}
if (kTransformA == ComplexTransform::kNone) {
warp_mma(accum.imag, imag_A, real_B, accum.imag);
}
else {
warp_mma(accum.imag, imag_A, neg_real_B, accum.imag);
}
if (kTransformA == ComplexTransform::kNone ^ kTransformB == ComplexTransform::kNone) {
warp_mma(accum.real, imag_A, imag_B, accum.real);
}
else {
warp_mma(accum.real, imag_A, neg_imag_B, accum.real);
}
}
public:
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A_real,
///< iterator over A operand in global memory
IteratorA iterator_A_imag,
///< iterator over B operand in global memory
IteratorB iterator_B_real,
///< iterator over B operand in global memory
IteratorB iterator_B_imag,
///< initial value of accumulator
FragmentC const &src_accum) {
//
// Prologue
//
// Perform accumulation in the 'd' output operand
accum = src_accum;
FragmentA tb_frag_A_real;
FragmentA tb_frag_A_imag;
FragmentB tb_frag_B_real;
FragmentB tb_frag_B_imag;
tb_frag_A_real.clear();
tb_frag_A_imag.clear();
tb_frag_B_real.clear();
tb_frag_B_imag.clear();
// The last kblock is loaded in the prolog
iterator_A_real.load(tb_frag_A_real);
iterator_A_imag.load(tb_frag_A_imag);
iterator_B_real.load(tb_frag_B_real);
iterator_B_imag.load(tb_frag_B_imag);
++iterator_A_real;
++iterator_A_imag;
++iterator_B_real;
++iterator_B_imag;
this->smem_iterator_A_.store(tb_frag_A_real);
this->smem_iterator_A_.store_with_pointer_offset(tb_frag_A_imag, Base::SharedStorage::kImaginaryStrideA);
this->smem_iterator_B_.store(tb_frag_B_real);
this->smem_iterator_B_.store_with_pointer_offset(tb_frag_B_imag, Base::SharedStorage::kImaginaryStrideB);
++this->smem_iterator_A_;
++this->smem_iterator_B_;
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA warp_frag_real_A[2];
WarpFragmentA warp_frag_imag_A[2];
WarpFragmentB warp_frag_real_B[2];
WarpFragmentB warp_frag_imag_B[2];
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_real_A[0]);
this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[0], Base::SharedStorage::kImaginaryStrideA);
this->warp_tile_iterator_B_.load(warp_frag_real_B[0]);
this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[0], Base::SharedStorage::kImaginaryStrideB);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
Operator warp_mma;
int smem_write_stage_idx = 1;
// Avoid reading out of bounds
iterator_A_real.clear_mask(gemm_k_iterations <= 1);
iterator_A_imag.clear_mask(gemm_k_iterations <= 1);
iterator_B_real.clear_mask(gemm_k_iterations <= 1);
iterator_B_imag.clear_mask(gemm_k_iterations <= 1);
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing
// shared memory loads (which have the tighest latency requirement).
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(tb_frag_A_real);
this->smem_iterator_A_.store_with_pointer_offset(tb_frag_A_imag, Base::SharedStorage::kImaginaryStrideA);
this->smem_iterator_B_.store(tb_frag_B_real);
this->smem_iterator_B_.store_with_pointer_offset(tb_frag_B_imag, Base::SharedStorage::kImaginaryStrideB);
__syncthreads();
++this->smem_iterator_B_;
++this->smem_iterator_A_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
else {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations,
0});
}
smem_write_stage_idx ^= 1;
}
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_real_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_A_.load_with_pointer_offset(warp_frag_imag_A[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideA);
this->warp_tile_iterator_B_.load(warp_frag_real_B[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load_with_pointer_offset(warp_frag_imag_B[(warp_mma_k + 1) % 2], Base::SharedStorage::kImaginaryStrideB);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k == 0) {
iterator_A_real.load(tb_frag_A_real);
iterator_A_imag.load(tb_frag_A_imag);
iterator_B_real.load(tb_frag_B_real);
iterator_B_imag.load(tb_frag_B_imag);
++iterator_A_real;
++iterator_A_imag;
++iterator_B_real;
++iterator_B_imag;
// Avoid reading out of bounds if this was the last loop iteration
iterator_A_real.clear_mask(gemm_k_iterations <= 2);
iterator_A_imag.clear_mask(gemm_k_iterations <= 2);
iterator_B_real.clear_mask(gemm_k_iterations <= 2);
iterator_B_imag.clear_mask(gemm_k_iterations <= 2);
}
warp_mma_planar_complex(
warp_mma,
accum,
warp_frag_real_A[warp_mma_k % 2],
warp_frag_imag_A[warp_mma_k % 2],
warp_frag_real_B[warp_mma_k % 2],
warp_frag_imag_B[warp_mma_k % 2]);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 14,746 | C | 33.698823 | 141 | 0.639699 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming
expectations about data layout of the global memory fragments, data types,
and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp
instructions.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/gemm/warp/default_mma_complex_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/threadblock/default_multistage_mma_complex_core.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h"
#include "cutlass/gemm/threadblock/mma_multistage.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex double-precision
///
/// A: column-major
/// B: row-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, InstructionShape_,
complex<double>, layout::ColumnMajor,
complex<double>, layout::RowMajor,
complex<double>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = complex<double>;
using LayoutA = layout::ColumnMajor;
using ElementB = complex<double>;
using LayoutB = layout::RowMajor;
using ElementC = complex<double>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped 128
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous128b;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
/// Partial specialization for complex double-precision
///
/// A: column-major
/// B: row-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, InstructionShape_,
complex<double>, layout::ColumnMajor,
complex<double>, layout::ColumnMajor,
complex<double>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = complex<double>;
using LayoutA = layout::ColumnMajor;
using ElementB = complex<double>;
using LayoutB = layout::ColumnMajor;
using ElementC = complex<double>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
using Operator = Operator_;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped 128
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex double-precision
///
/// A: row-major
/// B: column-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, InstructionShape_,
complex<double>, layout::RowMajor,
complex<double>, layout::ColumnMajor,
complex<double>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = complex<double>;
using LayoutA = layout::RowMajor;
using ElementB = complex<double>;
using LayoutB = layout::ColumnMajor;
using ElementC = complex<double>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped 128
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise128x4;
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
/// Partial specialization for complex double-precision
///
/// A: row-major
/// B: row-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, InstructionShape_,
complex<double>, layout::RowMajor,
complex<double>, layout::RowMajor,
complex<double>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = complex<double>;
using LayoutA = layout::RowMajor;
using ElementB = complex<double>;
using LayoutB = layout::RowMajor;
using ElementC = complex<double>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped 128
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise128x4;
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous128b;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex floating-point
///
/// A: column-major
/// B: column-major
/// Operator: arch::OpMultiplyAddComplex
/// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<16, 8, 8>,
complex<float>, layout::ColumnMajor,
complex<float>, layout::ColumnMajor,
complex<float>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<16, 8, 8>;
using ElementA = complex<float>;
using LayoutA = layout::ColumnMajor;
using ElementB = complex<float>;
using LayoutB = layout::ColumnMajor;
using ElementC = complex<float>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped
static int const kAccessSizeInBits = 64;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
/// Partial specialization for complex floating-point
///
/// A: column-major
/// B: row-major
/// Operator: arch::OpMultiplyAddComplex
/// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<16, 8, 8>,
complex<float>, layout::ColumnMajor,
complex<float>, layout::RowMajor,
complex<float>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<16, 8, 8>;
using ElementA = complex<float>;
using LayoutA = layout::ColumnMajor;
using ElementB = complex<float>;
using LayoutB = layout::RowMajor;
using ElementC = complex<float>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped
static int const kAccessSizeInBits = 64;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex floating-point
///
/// A: row-major
/// B: column-major
/// Operator: arch::OpMultiplyAddComplex
/// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<16, 8, 8>,
complex<float>, layout::RowMajor,
complex<float>, layout::ColumnMajor,
complex<float>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<16, 8, 8>;
using ElementA = complex<float>;
using LayoutA = layout::RowMajor;
using ElementB = complex<float>;
using LayoutB = layout::ColumnMajor;
using ElementC = complex<float>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped
static int const kAccessSizeInBits = 64;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise;
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex floating-point
///
/// A: row-major
/// B: row-major
/// Operator: arch::OpMultiplyAddComplex
/// Math Instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<16, 8, 8>,
complex<float>, layout::RowMajor,
complex<float>, layout::RowMajor,
complex<float>, LayoutC_,
arch::OpClassTensorOp,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<16, 8, 8>;
using ElementA = complex<float>;
using LayoutA = layout::RowMajor;
using ElementB = complex<float>;
using LayoutB = layout::RowMajor;
using ElementC = complex<float>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped
static int const kAccessSizeInBits = 64;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise;
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
WarpShape, InstructionShape,
ElementA, SmemLayoutA,
ElementB, SmemLayoutB,
ElementC, LayoutC,
kTransformA, kTransformB,
Operator>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex SIMT operation
///
/// A: column-major
/// B: column-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
typename RealA,
typename RealB,
typename RealC,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<1, 1, 1>,
complex<RealA>, layout::ColumnMajor,
complex<RealB>, layout::ColumnMajor,
complex<RealC>, LayoutC_,
arch::OpClassSimt,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = complex<RealA>;
using LayoutA = layout::ColumnMajor;
using ElementB = complex<RealB>;
using LayoutB = layout::ColumnMajor;
using ElementC = complex<RealC>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of access
static int const kAccessSizeInBits = sizeof_bits<ElementA>::value;
/// No vectorized accesses
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
SmemThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4; // TODO need to extract these from template data
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
1, /// 1 partition along K dimension
kTransformA, /// Transform for A
kTransformB /// Transform for B
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, Shape::kK / 32>,
WarpCount::kK>;
};
/// Partial specialization for complex SIMT operation
///
/// A: column-major
/// B: row-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
typename RealA,
typename RealB,
typename RealC,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<1, 1, 1>,
complex<RealA>, layout::ColumnMajor,
complex<RealB>, layout::RowMajor,
complex<RealC>, LayoutC_,
arch::OpClassSimt,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = complex<RealA>;
using LayoutA = layout::ColumnMajor;
using ElementB = complex<RealB>;
using LayoutB = layout::RowMajor;
using ElementC = complex<RealC>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of access
static int const kAccessSizeInBits = sizeof_bits<ElementA>::value;
/// No vectorized accesses
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4; // TODO need to extract these from template data
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
1, /// 1 partition along K dimension
kTransformA, /// Transform for A
kTransformB /// Transform for B
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, 0>, // or Shape::kK / 32
WarpCount::kK>;
};
/// Partial specialization for complex SIMT operation
///
/// A: row-major
/// B: column-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
typename RealA,
typename RealB,
typename RealC,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<1, 1, 1>,
complex<RealA>, layout::RowMajor,
complex<RealB>, layout::ColumnMajor,
complex<RealC>, LayoutC_,
arch::OpClassSimt,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = complex<RealA>;
using LayoutA = layout::RowMajor;
using ElementB = complex<RealB>;
using LayoutB = layout::ColumnMajor;
using ElementC = complex<RealC>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of access
static int const kAccessSizeInBits = sizeof_bits<ElementA>::value;
/// No vectorized accesses
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
SmemThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
SmemThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4; // TODO need to extract these from template data
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
1, /// 1 partition along K dimension
kTransformA, /// Transform for A
kTransformB /// Transform for B
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<Shape::kK / 32, 0>,
MatrixShape<0, Shape::kK / 32>,
WarpCount::kK>;
};
/// Partial specialization for complex SIMT operation
///
/// A: row-major
/// B: row-major
/// Operator: arch::OpMultiplyAddComplex or arch::OpMultiplyGaussianComplex
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
typename RealA,
typename RealB,
typename RealC,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMultistageMmaComplexCore<
Shape_, WarpShape_, GemmShape<1, 1, 1>,
complex<RealA>, layout::RowMajor,
complex<RealB>, layout::RowMajor,
complex<RealC>, LayoutC_,
arch::OpClassSimt,
Stages,
TransformA, TransformB,
Operator_,
CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = complex<RealA>;
using LayoutA = layout::RowMajor;
using ElementB = complex<RealB>;
using LayoutB = layout::RowMajor;
using ElementC = complex<RealC>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using Operator = Operator_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of access
static int const kAccessSizeInBits = sizeof_bits<ElementA>::value;
/// No vectorized accesses
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
SmemThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4; // TODO need to extract these from template data
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
1, /// 1 partition along K dimension
kTransformA, /// Transform for A
kTransformB /// Transform for B
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<Shape::kK / 32, 0>,
MatrixShape<0, 0>, // or Shape::kK / 32
WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 65,201 | C | 35.043118 | 101 | 0.690572 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming
expectations about data layout of the global memory fragments, data types,
and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp
instructions.
SM80 Multi stage kernel expects stage number to be larger or equal to 3
to use asyncronous copy.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
#include "cutlass/gemm/threadblock/default_multistage_mma_complex_core.h"
#include "cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h"
#include "cutlass/gemm/threadblock/mma_multistage.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for double-precision
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::ColumnMajor, double, layout::ColumnMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::ColumnMajor;
using ElementB = double;
using LayoutB = layout::ColumnMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 64;
/// Default Operator
using Operator = Operator_;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
/// Partial specialization for double-precision
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::ColumnMajor, double, layout::RowMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::ColumnMajor;
using ElementB = double;
using LayoutB = layout::RowMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 64;
/// Default Operator
using Operator = Operator_;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b;
// Shared memory layout
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for double-precision
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::RowMajor, double, layout::ColumnMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::RowMajor;
using ElementB = double;
using LayoutB = layout::ColumnMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 64;
/// Default Operator
using Operator = Operator_;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise;
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
///
/// Partial specialization for double-precision
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::RowMajor, double, layout::RowMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::RowMajor;
using ElementB = double;
using LayoutB = layout::RowMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 64;
/// Default Operator
using Operator = Operator_;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise;
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<16, 2>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for double-precision
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::AffineRank2ColumnMajor, double, layout::AffineRank2ColumnMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::AffineRank2ColumnMajor;
using ElementB = double;
using LayoutB = layout::AffineRank2ColumnMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::ColumnMajor,
ElementB,
layout::ColumnMajor,
ElementC,
LayoutC,
arch::OpClassTensorOp,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/// Partial specialization for double-precision
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::AffineRank2ColumnMajor, double, layout::AffineRank2RowMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::AffineRank2ColumnMajor;
using ElementB = double;
using LayoutB = layout::AffineRank2RowMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::ColumnMajor,
ElementB,
layout::RowMajor,
ElementC,
LayoutC,
arch::OpClassTensorOp,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for double-precision
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::AffineRank2RowMajor, double, layout::AffineRank2ColumnMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::AffineRank2RowMajor;
using ElementB = double;
using LayoutB = layout::AffineRank2ColumnMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementC,
LayoutC,
arch::OpClassTensorOp,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
////////////////////////////////////////////////////////////////////////////////
///
/// Partial specialization for double-precision
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double,
layout::AffineRank2RowMajor, double, layout::AffineRank2RowMajor, double,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = double;
using LayoutA = layout::AffineRank2RowMajor;
using ElementB = double;
using LayoutB = layout::AffineRank2RowMajor;
using ElementC = double;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::RowMajor,
ElementC,
LayoutC,
arch::OpClassTensorOp,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float-precision
///
/// ElementA: complex<float>
/// ElementB: complex<float>
/// ElementC: complex<float>
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Layout for A operand
typename LayoutA_,
/// Layout for B operand
typename LayoutB_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// per-element transformation for elements of A
ComplexTransform TransformA_,
/// per-element transformation for elements of B
ComplexTransform TransformB_
>
struct DefaultMmaCore<
Shape_, WarpShape_, GemmShape<16, 8, 8>,
complex<float>, LayoutA_,
complex<float>, LayoutB_,
complex<float>, LayoutC_,
arch::OpClassTensorOp,
Stages,
Operator_,
false,
CacheOpA,
CacheOpB,
TransformA_, TransformB_, true> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<16, 8, 8>;
using ElementA = complex<float>;
using LayoutA = LayoutA_;
using ElementB = complex<float>;
using LayoutB = LayoutB_;
using ElementC = complex<float>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
static const ComplexTransform TransformA = TransformA_;
static const ComplexTransform TransformB = TransformB_;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
static_assert(
platform::is_same<Operator, arch::OpMultiplyAddComplex>::value ||
platform::is_same<Operator, arch::OpMultiplyAddGaussianComplex>::value ||
platform::is_same<Operator, arch::OpMultiplyAddComplexFastF32>::value,
"The operator tag must indicate complex multiplication.");
//
// Underlying template
//
using MmaComplexCore = DefaultMultistageMmaComplexCore<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
arch::OpClassTensorOp,
kStages,
TransformA,
TransformB,
Operator,
kCacheOpA,
kCacheOpB
>;
//
// Shared memory layouts
//
using SmemLayoutA = typename MmaComplexCore::SmemLayoutA;
// Shared memory layout
using SmemLayoutB = typename MmaComplexCore::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename MmaComplexCore::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename MmaComplexCore::SmemIteratorA;
/// ThreadMap of iterator B
using IteratorThreadMapB = typename MmaComplexCore::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename MmaComplexCore::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename MmaComplexCore::MmaTensorOp;
/// Policy used to define MmaPipelined
using MmaPolicy = typename MmaComplexCore::MmaPolicy;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for double-precision
///
/// ElementA: complex<double>
/// ElementB: complex<double>
/// ElementC: complex<double>
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Layout for A operand
typename LayoutA_,
/// Layout for B operand
typename LayoutB_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// per-element transformation for elements of A
ComplexTransform TransformA_,
/// per-element transformation for elements of B
ComplexTransform TransformB_
>
struct DefaultMmaCore<
Shape_, WarpShape_, InstructionShape_,
complex<double>, LayoutA_,
complex<double>, LayoutB_,
complex<double>, LayoutC_,
arch::OpClassTensorOp,
Stages,
Operator_,
false,
CacheOpA,
CacheOpB,
TransformA_, TransformB_, true> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = complex<double>;
using LayoutA = LayoutA_;
using ElementB = complex<double>;
using LayoutB = LayoutB_;
using ElementC = complex<double>;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
static const ComplexTransform TransformA = TransformA_;
static const ComplexTransform TransformB = TransformB_;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
static_assert(WarpCount::kCount > 1,
"This specialization requires at least two warps.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 64;
/// Default Operator
using Operator = Operator_;
static_assert(
platform::is_same<Operator, arch::OpMultiplyAddComplex>::value ||
platform::is_same<Operator, arch::OpMultiplyAddGaussianComplex>::value,
"The operator tag must indicate complex multiplication.");
//
// Underlying template
//
using MmaComplexCore = DefaultMultistageMmaComplexCore<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
arch::OpClassTensorOp,
kStages,
TransformA,
TransformB,
Operator,
kCacheOpA,
kCacheOpB
>;
//
// Shared memory layouts
//
using SmemLayoutA = typename MmaComplexCore::SmemLayoutA;
// Shared memory layout
using SmemLayoutB = typename MmaComplexCore::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename MmaComplexCore::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename MmaComplexCore::SmemIteratorA;
/// ThreadMap of iterator B
using IteratorThreadMapB = typename MmaComplexCore::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename MmaComplexCore::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename MmaComplexCore::MmaTensorOp;
/// Policy used to define MmaPipelined
using MmaPolicy = typename MmaComplexCore::MmaPolicy;
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value, int(128 / sizeof(ElementA))>;
// Shared memory layout
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementB>::value, int(128 / sizeof(ElementB))>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
static int const kWarpThreadArrangementContiguousB =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementB>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, Stages,
Operator_, false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousB =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedB =
kWarpSize / kWarpThreadArrangementContiguousB;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value, int(128 / sizeof(ElementA))>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousB,
kWarpThreadArrangementStridedB>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kWarpThreadArrangementContiguousA =
Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value);
static int const kWarpThreadArrangementStridedA =
kWarpSize / kWarpThreadArrangementContiguousA;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<ElementB>::value, int(128 / sizeof(ElementB))>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads,
layout::PitchLinearShape<kWarpThreadArrangementContiguousA,
kWarpThreadArrangementStridedA>,
kAccessSizeInBits / sizeof_bits<ElementA>::value>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major-interleaved
/// B: row-major-interleaved
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
///
/// Column/RowMajorInterleved<InterleavedK>(m, n) is mapped to Column/RowMajor(m
/// x InterleavedK, n / InterleavedK) so that Column/RowMajor global iterators
/// can be reused. The shared store iterator is the same as the crosswise shared
/// store iterator. So, the only thing we need to do is to swap the coordinates
/// (contiguous <=> strided) used by the global iterator and the shared store
/// iterator.
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator_,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Number of interleaved K
int InterleavedK>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajorInterleaved<InterleavedK>, ElementB_,
layout::RowMajorInterleaved<InterleavedK>, ElementC_,
LayoutC_, arch::OpClassTensorOp, Stages, Operator_,
AccumulatorsInRowMajor, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementB = ElementB_;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static int const kInterleavedK = InterleavedK;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kElementsPerAccess =
kAccessSizeInBits / sizeof_bits<ElementA>::value;
static int const kWarpThreadArrangementContiguous =
kInterleavedK / kElementsPerAccess;
static int const kWarpThreadArrangementStrided =
kWarpSize / kWarpThreadArrangementContiguous;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, kInterleavedK>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, kInterleavedK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM * kInterleavedK,
Shape::kK / kInterleavedK>,
kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMap<
IteratorThreadMapA,
layout::PitchLinearShape<kWarpThreadArrangementContiguous,
kWarpThreadArrangementStrided>>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
SmemThreadMapA>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN * kInterleavedK,
Shape::kK / kInterleavedK>,
kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapB = transform::TransposePitchLinearThreadMap<
IteratorThreadMapB,
layout::PitchLinearShape<kWarpThreadArrangementContiguous,
kWarpThreadArrangementStrided>>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
SmemThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB,
ElementC, LayoutC, Operator, WarpCount::kK, AccumulatorsInRowMajor>::Type;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>,
MatrixShape<0, 0>, WarpCount::kK>;
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
// Shared memory layout
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
SmemThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4; // TODO need to extract these from template data
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static_assert(!((Shape::kK / 32) % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, Shape::kK / 32>,
WarpCount::kK>;
};
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
// Shared memory layout
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
IteratorThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4; // TODO need to extract these from template data
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK>;
};
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
// Shared memory layout
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
SmemThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
SmemThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4; // TODO need to extract these from template data
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static_assert(!((Shape::kK / 32) % LaneM) && !((Shape::kK / 32) % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<Shape::kK / 32, 0>,
MatrixShape<0, Shape::kK / 32>,
WarpCount::kK>;
};
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Number of warps present
using WarpCount = GemmShape<Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size.");
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Default Operator
using Operator = Operator_;
// Warp thread arrangement
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
// Shared memory layout
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0,
SmemThreadMapA>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIterator<
MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1,
IteratorThreadMapB>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = 4; // TODO need to extract these from template data
static const int WarpNumThreadsN = 8;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static_assert(!((Shape::kK / 32) % LaneM),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<Shape::kK / 32, 0>,
MatrixShape<0, 0>,
WarpCount::kK>;
};
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2RowMajor,
ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::ColumnMajor,
ElementB,
layout::RowMajor,
ElementC,
LayoutC,
arch::OpClassSimt,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementC,
LayoutC,
arch::OpClassSimt,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::ColumnMajor,
ElementB,
layout::ColumnMajor,
ElementC,
LayoutC,
arch::OpClassSimt,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/// Partial specialization for SIMT GEMMs using multistage pipeline.
///
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Number of stages
int Stages,
/// Operation performed by Simt
typename Operator_,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB>
struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_,
layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2RowMajor, ElementC_,
LayoutC_, arch::OpClassSimt, Stages, Operator_,
false, CacheOpA, CacheOpB> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
static int const kStages = Stages;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::RowMajor,
ElementC,
LayoutC,
arch::OpClassSimt,
kStages,
Operator,
false,
kCacheOpA,
kCacheOpB>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| 103,000 | C | 34.310593 | 101 | 0.661854 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/default_gemv_core.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level batched GEMV assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting SIMT instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/gemm/threadblock/gemv.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/// Template defininng default vector-matrix multiply operators inferred from threadblock tile size,
/// global memory data layout.
template <
typename Shape_, /// Shape of the threadblock vector-matrix multiply operator
typename ThreadShape_, /// Shape of per-thread vector-matrix multiply operator
typename ElementA_, /// Element data type of A operand
typename LayoutA_, /// Layout of operand A
typename ElementB_, /// Element data type of B operand
typename LayoutB_, /// Layout of operand B
typename ElementC_, /// Data type of accumulator
typename LayoutC_ /// Layout of accumulator
>
struct DefaultGemvCore {
using Shape = Shape_;
using ThreadShape = ThreadShape_;
using LayoutA = LayoutA_;
using LayoutB = LayoutB_;
using LayoutC = LayoutC_;
using ElementA = ElementA_;
using ElementB = ElementB_;
using ElementC = ElementC_;
static int const kThreadsPerN = Shape::kN / ThreadShape::kN;
using IteratorPolicyA = typename platform::conditional<
platform::is_same<LayoutA, layout::RowMajor>::value,
cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous<
layout::PitchLinearShape<Shape::kK, Shape::kM>, 1, ThreadShape::kK>,
cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided<
layout::PitchLinearShape<Shape::kM, Shape::kK>, 1, ThreadShape::kM>>::type;
using IteratorA = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<Shape::kM, Shape::kK>, ElementA, LayoutA, 1, IteratorPolicyA>;
using IteratorPolicyB = typename platform::conditional<
platform::is_same<LayoutB, layout::RowMajor>::value,
cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous<
layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreadsPerN, ThreadShape::kN>,
cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided<
layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreadsPerN, ThreadShape::kK>>::type;
using IteratorB = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<Shape::kK, Shape::kN>, ElementB, LayoutB, 0, IteratorPolicyB>;
using IteratorPolicyC = typename platform::conditional<
platform::is_same<LayoutC, layout::RowMajor>::value,
cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous<
layout::PitchLinearShape<Shape::kN, Shape::kM>, kThreadsPerN, ThreadShape::kN>,
cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided<
layout::PitchLinearShape<Shape::kM, Shape::kN>, kThreadsPerN, ThreadShape::kM>>::type;
using IteratorC = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC, 0, IteratorPolicyC>;
using MmaSimtOp = typename cutlass::gemm::thread::Mma<
cutlass::gemm::GemmShape<ThreadShape::kM, ThreadShape::kN, Shape::kK>,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC>;
using Operator = MmaSimtOp;
// Assertions for correctness
static_assert((Shape::kM == 1), "M=1 is required for GEMV");
static_assert((ThreadShape::kM == 1), "M=1 is required for GEMV");
static_assert(Shape::kK % ThreadShape::kK == 0, "Shape::K must be a multiple of ThreadShape::K");
static_assert(((ThreadShape::kK == 1) ||
(ThreadShape::kK == 2) ||
(ThreadShape::kK == 4) ||
(ThreadShape::kK == 8) ||
(ThreadShape::kK == 16) ||
(ThreadShape::kK == 32)
),
"ThreadShape::K must be a 1, 2, 4, 8, 16 or 32");
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| 6,979 | C | 44.921052 | 116 | 0.636767 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex_core.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming
expectations about data layout of the global memory fragments, data types,
and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp
instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Template defininng default matrix multiply operators inferred from
/// threadblock tile size, global memory data layout, and target math
/// instruction.
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Number of stages
int Stages,
/// Complex transformation on operand A
ComplexTransform TransformA,
/// Complex transformation on operand B
ComplexTransform TransformB,
/// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator = arch::OpMultiplyAddComplex,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global>
struct DefaultMultistageMmaComplexCore;
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 4,959 | C | 40.333333 | 100 | 0.67433 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/default_mma_core_with_access_size.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting simt instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/threadblock/mma_pipelined.h"
#include "cutlass/gemm/threadblock/mma_singlestage.h"
#include "cutlass/arch/cache_operation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Size of a threadblock-scoped access
int kAccessSizeInBits = -1, // -1 denoting the default
/// Number of stages
int Stages = 2,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global,
/// per-element transformation for elements of A
ComplexTransform TransformA = ComplexTransform::kNone,
/// per-element transformation for elements of B
ComplexTransform TransformB = ComplexTransform::kNone,
bool IsComplex = false // (is_complex<ElementA>::value || is_complex<ElementB>::value)
>
struct DefaultMmaCoreWithAccessSize;
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// per-element transformation for elements of A
ComplexTransform TransformA,
/// per-element transformation for elements of B
ComplexTransform TransformB,
bool IsComplex
>
struct DefaultMmaCoreWithAccessSize<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
OperatorClass, -1, Stages, Operator, AccumulatorsInRowMajor,
CacheOpA, CacheOpB, TransformA, TransformB, IsComplex
> : DefaultMmaCore<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
OperatorClass, Stages, Operator, AccumulatorsInRowMajor,
CacheOpA, CacheOpB, TransformA, TransformB, IsComplex
> {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a threadblock-scoped access (a value of -1 indicates the default)
int kAccessSizeInBits_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCoreWithAccessSize<Shape_, WarpShape_, typename std::enable_if<kAccessSizeInBits_ != -1, GemmShape<1, 1, 1>>::type, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassSimt, kAccessSizeInBits_, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
static int const kElementsPerAccessDefault = 1;
static_assert(kAccessSizeInBits_ == -1 ||
sizeof_bits<ElementA>::value == sizeof_bits<ElementB>::value ||
kAccessSizeInBits_ / sizeof_bits<ElementA>::value == kElementsPerAccessDefault,
"Non-default value for kAccessSizeInBits_ is only allowed if size(elementA) == sizeof(elementB)");
static int const kElementsPerAccess = (kAccessSizeInBits_ != -1) ? kAccessSizeInBits_ / sizeof_bits<ElementA>::value : kElementsPerAccessDefault;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| 12,645 | C | 37.43769 | 147 | 0.670225 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/threadblock_swizzle_streamk.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implements streamk threadblock mapping blockIdx to GEMM problems.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/gemm/threadblock/index_remat.h"
#include <iostream>
#include "cutlass/core_io.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock mapping control for GEMMs
struct ThreadblockSwizzleStreamK {
/// Advertise StreamkFeature
using StreamkFeature = void;
/// Kernel traits
template <typename GemmKernel>
struct KernelTraits {};
/// Reduction strategy
enum ReductionStrategy
{
kNone, // Data-parallel strategy (no seams, fixup, etc.)
kAtomic, // Non-deterministic reduction of SK-block partials using atomic aggregation in L2
kMixed, // Deterministic reduction of SK-block partials employing either:
// (a) A separate wave of reduction thread blocks" (for scenarios with lots of
// SK-blocks per SK-tile)
// (b) Turnstile-ordered atomic aggregation in L2 (for scenarios with few
// SK-blocks per SK-tile)
};
static ReductionStrategy const kReductionStrategy = kMixed;
//
// Heuristics
//
/// Data-parallel wave-quantization efficiency threshold (above which we go data-parallel)
static float constexpr kDpEfficiencyThreshold = 0.92f;
/// Minimum number of MAC-iterations per streamk block
static int const kMinItersPerSkBlock = 2;
/// Height in CTAs of a grid rasterization cohort
static int const kCohortCtasM = 8;
/// Width in CTAs of a grid rasterization cohort
static int const kCohortCtasN = 4;
/// Number of CTAs per cohort
static int const kCtasPerCohort = kCohortCtasN * kCohortCtasM;
/// Cost-equivalent number of SM-iterations for fixup I/O
static int const kFixupStartupIterEquiv = 10;
static int const kFixupPeerIterEquiv = 3;
//
// Member state
//
/// The 3D value-extents of the GEMM computation volume (m,n,k)
GemmCoord problem_size;
/// The 2D tile-extents of the output matrix (m,n)
GemmCoord tiled_shape;
/// Number of iterations per output tile
int iters_per_tile;
/// Number of reduction blocks in the grid
int reduction_blocks;
int dp_blocks; /// Number of data-parallel thread blocks in the grid
int dp_first_wave_tiles; /// Number of output tiles each CTA in the first DP wave will produce
int sk_tiles;
int sk_regions;
int sk_blocks_per_region;
int sk_big_blocks_per_region;
int sk_iters_per_region;
int sk_iters_per_normal_block; /// Number of iterations for normal SK-blocks
int sk_waves; /// Number of SK waves in the grid
/// CTA occupancy per SM
int sm_occupancy;
/// Number of SMs for dispatch heuristics to load-balance using Stream-K CTAs (wave size)
int avail_sms;
/// Whether to perform cohort CTA rasterization
bool cohort_raster;
/// Div/mod accelerators
struct
{
FastDivmod tiled_shape_m;
FastDivmod tiled_shape_n;
FastDivmod tiled_cohort_shape_n;
FastDivmod iters_per_tile;
FastDivmod sk_iters_per_normal_block;
FastDivmod sk_iters_per_big_block;
FastDivmod sk_iters_per_region;
FastDivmod sk_blocks_per_region;
FastDivmod sm_occupancy;
} div_mod;
//
// Host+device interface
//
/// Constructor
CUTLASS_HOST_DEVICE
ThreadblockSwizzleStreamK() {}
//
// Host-side interface
//
/// Debug print
void Print()
{
#ifndef __CUDA_ARCH__
int tiles = tiled_shape.m() * tiled_shape.n();
std::cout <<
"problem_size: (" << problem_size.m() << "," << problem_size.n() << ")" <<
", reduction_blocks: " << reduction_blocks <<
", dp_blocks: " << dp_blocks <<
", sk_blocks_per_region: " << sk_blocks_per_region <<
", sk_regions: " << sk_regions <<
", sk_iters_per_normal_block: " << sk_iters_per_normal_block <<
", sk_big_blocks_per_region: " << sk_big_blocks_per_region <<
", dp_first_wave_tiles: " << dp_first_wave_tiles <<
", tiled_shape: (" << tiled_shape.m() << "," << tiled_shape.n() << ")" <<
", tiles: " << tiles <<
", iters_per_tile: " << iters_per_tile <<
", dp_tiles: " << tiles - sk_tiles <<
", sk_tiles: " << sk_tiles <<
", avail_sms: " << avail_sms <<
", sm_occupancy: " << sm_occupancy <<
", avail_sms: " << avail_sms <<
", cohort_raster: " << cohort_raster <<
"\n\n";
#endif
}
// Compute sk_blocks to dispatch for a given number of sk_tiles
static void get_sk_blocks(
int &sk_blocks, /// [out]
int &savings_iters, /// [out]
int sk_tiles,
int iters_per_tile,
int avail_sms,
int max_sk_occupancy,
bool allow_partial_wave)
{
savings_iters = INT_MIN;
sk_blocks = 0;
if (sk_tiles == 0) {
return;
}
int sk_iters = sk_tiles * iters_per_tile;
int dp_equiv_waves = (sk_tiles + avail_sms - 1) / avail_sms;
int dp_equiv_iters = iters_per_tile * dp_equiv_waves;
int min_sk_blocks = (allow_partial_wave) ? fast_min(avail_sms, sk_tiles + 1) : avail_sms;
int max_sk_blocks = fast_min(avail_sms * max_sk_occupancy, sk_iters / kMinItersPerSkBlock);
for (int trial_sk_blocks = min_sk_blocks; trial_sk_blocks <= max_sk_blocks; ++trial_sk_blocks)
{
int sk_waves = (trial_sk_blocks + avail_sms - 1) / avail_sms;
int max_sk_iters_per_block = (sk_iters + trial_sk_blocks - 1) / trial_sk_blocks;
int sk_iter_equiv = max_sk_iters_per_block * sk_waves;
int num_peers = ((trial_sk_blocks + sk_tiles - 1) / sk_tiles) + 1; // add one for alignment skew
float iter_cost = 0.02f * float(num_peers) * float(sk_iter_equiv);
if (trial_sk_blocks % sk_tiles == 0)
{
// aligned
num_peers = (trial_sk_blocks / sk_tiles);
iter_cost = 0.0f;
}
float peer_cost = 2.0f * float(num_peers);
float base_cost = 2.0f * float(sk_waves);
int fixup_iter_equiv = int(base_cost + iter_cost + peer_cost);
int trial_savings_iters = dp_equiv_iters - sk_iter_equiv - fixup_iter_equiv;
if (trial_savings_iters >= savings_iters) {
savings_iters = trial_savings_iters;
sk_blocks = trial_sk_blocks;
}
}
}
/// Determine the populations of DP and SK blocks to invoke for the given number of output tiles
static void get_blocks(
int &dp_tiles, /// [out]
int &sk_blocks, /// [out]
int output_tiles,
int iters_per_tile,
int avail_sms,
int sm_occupancy)
{
int full_waves = output_tiles / avail_sms;
int full_wave_tiles = full_waves * avail_sms;
int partial_wave_tiles = output_tiles - full_wave_tiles;
int score = -1;
dp_tiles = output_tiles;
sk_blocks = 0;
if (partial_wave_tiles == 0)
{
// Perfect quantization
return;
}
if (full_waves < sm_occupancy)
{
// We're less than full GPU occupancy
// Form the SK wave from the partial wave to get us up to full GPU occupancy
int max_sk_occupancy = sm_occupancy - full_waves;
dp_tiles = full_wave_tiles;
get_sk_blocks(
sk_blocks,
score,
partial_wave_tiles,
iters_per_tile,
avail_sms,
max_sk_occupancy,
true); // we can run with less than a full wave of SK-blocks
if (score < 0) {
// not profitable
sk_blocks = 0;
dp_tiles = output_tiles;
}
return;
}
// We're at (or greater) than GPU occupancy
if (full_waves % sm_occupancy == sm_occupancy - 1)
{
// Form the SK wave from the partial wave to get us to full GPU occupancy
int max_sk_occupancy = 1;
dp_tiles = full_wave_tiles;
get_sk_blocks(
sk_blocks,
score,
partial_wave_tiles,
iters_per_tile,
avail_sms,
max_sk_occupancy,
true); // we can run with less than a full wave of SK-blocks
if (score >= 0) {
return;
}
}
// Form the SK wave by combining the last full wave and the partial wave
// We're less than full GPU occupancy
dp_tiles = full_wave_tiles - avail_sms;
int max_sk_occupancy = sm_occupancy - ((full_waves - 1) % sm_occupancy);
get_sk_blocks(
sk_blocks,
score,
partial_wave_tiles + avail_sms,
iters_per_tile,
avail_sms,
max_sk_occupancy,
false); // we cannot run with less than a full wave of SK-blocks
if (score < 0) {
// not profitable
sk_blocks = 0;
dp_tiles = output_tiles;
}
}
/// Constructor: *Gemm* problem size (m, n, k)
template <typename GemmKernel>
ThreadblockSwizzleStreamK(
KernelTraits<GemmKernel> const kernel_traits_,
GemmUniversalMode const mode_,
GemmCoord const problem_size_,
GemmCoord const tile_size_,
int const batch_count_, /// Batch count (when mode_ == GemmUniversalMode::kBatched) or split-K-override splitting factor (when mode_ == GemmUniversalMode::kGemm)
int const sm_occupancy_,
int const avail_sms_)
:
problem_size(problem_size_),
tiled_shape(
(problem_size.m() + tile_size_.m() - 1) / tile_size_.m(),
(problem_size.n() + tile_size_.n() - 1) / tile_size_.n(),
(mode_ == GemmUniversalMode::kBatched) ? batch_count_ : 1),
iters_per_tile((problem_size.k() + tile_size_.k() - 1) / tile_size_.k()),
reduction_blocks(0),
dp_blocks(0),
dp_first_wave_tiles(1), // Default: one tile per DP-block in the first wave of DP blocks
sk_tiles(0),
sk_regions(1), // Default: a single region of iteration space (across all SK tiles)
sk_blocks_per_region(0),
sk_big_blocks_per_region(0),
sk_iters_per_region(0),
sk_iters_per_normal_block(0),
sk_waves(0),
sm_occupancy(sm_occupancy_),
avail_sms(fast_max(1, avail_sms_)),
cohort_raster(false)
{
size_t problem_bytes =
(sizeof(typename GemmKernel::ElementC) * problem_size.m() * problem_size.n()) +
(sizeof(typename GemmKernel::ElementA) * problem_size.m() * problem_size.k()) +
(sizeof(typename GemmKernel::ElementB) * problem_size.k() * problem_size.n());
size_t problem_flops = size_t(problem_size.m()) * size_t(problem_size.n()) * size_t(problem_size.k()) * 2;
float flops_per_byte = float(problem_flops) / float(problem_bytes);
int gpu_occupancy = avail_sms * sm_occupancy;
int output_tiles = tiled_shape.m() * tiled_shape.n();
int waves = (output_tiles + avail_sms - 1) / avail_sms;
float dp_efficiency = float(output_tiles) / float(waves * avail_sms);
//
// Determine dispatch composition of DP-tiles and SK-blocks
//
// Start with a DP-only configuration
int dp_tiles = output_tiles; // Number of data-parallel tiles
int sk_blocks = 0; // Number of thread blocks to produce the remaining SK tiles
// kGemm mode allows for SK load balancing
if (mode_ == GemmUniversalMode::kGemm)
{
if (batch_count_ > 1)
{
// Split-K override
dp_tiles = 0;
sk_blocks = output_tiles * batch_count_;
}
else if ((kReductionStrategy != kNone) && // Load-balancing strategy statically enabled
(avail_sms > 1)) // Plurality of SMs to load balance across
{
// Use heuristics
get_blocks(
dp_tiles, /// [out]
sk_blocks, /// [out]
output_tiles,
iters_per_tile,
avail_sms,
sm_occupancy);
}
}
sk_tiles = output_tiles - dp_tiles;
// Compute SK block iteration details
if (sk_blocks > 0)
{
sk_waves = (sk_blocks + avail_sms - 1) / avail_sms;
int sk_iters = sk_tiles * iters_per_tile;
sk_blocks = fast_min(sk_blocks, sk_iters);
sk_iters_per_normal_block = sk_iters / sk_blocks;
int extra_sk_iters = sk_iters - (sk_iters_per_normal_block * sk_blocks);
int sk_big_blocks = extra_sk_iters;
if ((sk_blocks > sk_tiles) && (sk_blocks % sk_tiles == 0))
{
// Split-K decomposition
sk_regions = sk_tiles;
}
sk_blocks_per_region = sk_blocks / sk_regions;
sk_big_blocks_per_region = sk_big_blocks / sk_regions;
sk_iters_per_region = sk_iters / sk_regions;
div_mod.sk_iters_per_normal_block = FastDivmod(sk_iters_per_normal_block);
div_mod.sk_iters_per_big_block = FastDivmod(sk_iters_per_normal_block + 1);
div_mod.sk_iters_per_region = FastDivmod(sk_iters_per_region);
div_mod.sk_blocks_per_region = FastDivmod(sk_blocks_per_region);
// Separate reduction heuristic
if ((kReductionStrategy == kMixed) &&
(sk_blocks > 2 * sk_tiles)) // Use a separate reduction wave whenever we would have more than three
// peers working on an SK tile. (This occurs when the ratio of SK-blocks
// to SK-tiles > 2, as a single tile may be covered by four SK-blocks,
// e.g.:[partial-block | block | block | partial-block] ). With three or
// less peers, the two non-finishing SK-blocks are not expexted to contend.
{
// Launch a reduction block every accumulator fragment in each SK-tile
static const int kAccumulatorFragments = GemmKernel::Epilogue::kAccumulatorFragments;
reduction_blocks = sk_tiles * kAccumulatorFragments;
}
}
//
// Compute DP blocks
//
dp_blocks = dp_tiles;
cutlass::gemm::GemmCoord tiled_cohort_shape(
(tiled_shape.m() + kCohortCtasM - 1) / kCohortCtasM,
(tiled_shape.n() + kCohortCtasN - 1) / kCohortCtasN,
batch_count_);
int cohort_blocks = (tiled_cohort_shape.m() * tiled_cohort_shape.n()) * kCtasPerCohort;
float cohort_efficiency = float(dp_blocks) / float(cohort_blocks);
// Check if the SK tiles would be in cohorts that are in-bounds
bool sk_in_range = true;
if (sk_tiles > 0)
{
int last_sk_tile = sk_tiles - 1;
int cohort_tile_idx = last_sk_tile / kCtasPerCohort;
int cohort_grid_m = cohort_tile_idx / tiled_cohort_shape.n();
int cohort_grid_n = (cohort_grid_m > 0) ?
tiled_cohort_shape.n() - 1 :
cohort_tile_idx % tiled_cohort_shape.n();
if ((((cohort_grid_m + 1) * kCohortCtasM) >= tiled_shape.m()) ||
(((cohort_grid_n + 1) * kCohortCtasN) >= tiled_shape.n()))
{
sk_in_range = false;
}
}
// Decide if we're going to be doing cohort raster
if (sk_in_range &&
(dp_blocks >= gpu_occupancy) &&
(cohort_efficiency > 0.85f))
{
cohort_raster = true;
dp_blocks = cohort_blocks;
}
else if (sk_waves > 0)
{
// Update semi-persistence of first DP wave to ensure full grid wavesets
// (Only applies when there's an SK component and we're not doing blocked cohort rasterization)
int dp_tile_waves = (dp_tiles + avail_sms - 1) / avail_sms;
int full_dp_tile_waves = dp_tiles / avail_sms;
int waveset_excess = (sk_waves + dp_tile_waves) % sm_occupancy;
if (dp_first_wave_tiles + waveset_excess <= full_dp_tile_waves)
{
dp_first_wave_tiles += waveset_excess;
dp_blocks -= (waveset_excess * avail_sms);
}
}
// Setup fast-div/mod for device-side usage
div_mod.tiled_shape_m = FastDivmod(tiled_shape.m());
div_mod.tiled_shape_n = FastDivmod(tiled_shape.n());
div_mod.tiled_cohort_shape_n = FastDivmod(tiled_cohort_shape.n());
div_mod.iters_per_tile = FastDivmod(iters_per_tile);
div_mod.sm_occupancy = FastDivmod(sm_occupancy);
}
/// Constructor: *ImplicitGemm* Conv2d problem size: conv_operator(NPQK, NHWC, KRSC)
template <typename GemmKernel>
ThreadblockSwizzleStreamK(
KernelTraits<GemmKernel> kernel_traits_,
GemmUniversalMode mode_,
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size_,
GemmCoord tile_size_,
int batch_count_,
int sm_occupancy_,
int avail_sms_, /// When the below are defaulted, the number of SMs that dispatch heuristics will attempt to load-balance
int dp_tiles_ = -1, /// Dispatch override: number of output tiles to assign to independent, data-parallel CTAs
int sk_blocks_ = -1) /// Dispatch override: number of Stream-K CTAs for cooperatively processing the remaining output tiles
:
ThreadblockSwizzleStreamK(
kernel_traits_,
mode_,
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size_),
tile_size_,
batch_count_,
sm_occupancy_,
avail_sms_,
dp_tiles_,
sk_blocks_)
{}
/// Constructor: *ImplicitGemm* Conv3d problem size: conv_operator(NZPQK, NDHWC, KTRSC)
template <typename GemmKernel>
ThreadblockSwizzleStreamK(
KernelTraits<GemmKernel> kernel_traits_,
GemmUniversalMode mode_,
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv3dProblemSize const &problem_size_,
GemmCoord tile_size_,
int batch_count_,
int sm_occupancy_,
int avail_sms_, /// When the below are defaulted, the number of SMs that dispatch heuristics will attempt to load-balance
int dp_tiles_ = -1, /// Dispatch override: number of output tiles to assign to independent, data-parallel CTAs
int sk_blocks_ = -1) /// Dispatch override: number of Stream-K CTAs for cooperatively processing the remaining output tiles
:
ThreadblockSwizzleStreamK(
kernel_traits_,
mode_,
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size_),
tile_size_,
batch_count_,
sm_occupancy_,
avail_sms_,
dp_tiles_,
sk_blocks_)
{}
/// Obtains number of threadblocks per GEMM
int get_num_blocks() const
{
// int reduction_waves = (reduction_blocks + avail_sms - 1) / avail_sms;
// return ((sk_waves + reduction_waves) * avail_sms) + dp_blocks;
int work_blocks = (sk_waves * avail_sms) + dp_blocks + reduction_blocks;
if (work_blocks < avail_sms)
{
return work_blocks;
}
int gpu_occupancy = sm_occupancy * avail_sms;
int gpu_wavesets = (work_blocks + gpu_occupancy - 1) / gpu_occupancy;
return gpu_wavesets * gpu_occupancy;
}
/// Obtains grid extents in CTAs
dim3 get_grid_dims() const
{
return dim3(get_num_blocks(), 1, tiled_shape.k());
}
//
// Device-side interface
//
/// Obtains number of threadblocks per GEMM
CUTLASS_DEVICE
int device_num_blocks() const
{
return gridDim.x;
}
/// Obtains tile index for the given sk iteration
CUTLASS_DEVICE
int get_sk_tile_idx(int iter) const
{
return div_mod.iters_per_tile.div(iter);
}
/// Obtains the calling threadblock's tiled coordinates for the given tile index
CUTLASS_DEVICE
GemmCoord get_tile_offset(int tile_idx) const
{
int m, n;
if (cohort_raster)
{
// tiled cohort raster
int cohort_tile_idx = tile_idx / kCtasPerCohort;
int cohort_grid_m, cohort_grid_n;
div_mod.tiled_cohort_shape_n(cohort_grid_m, cohort_grid_n, cohort_tile_idx);
int block_idx_cohort = tile_idx % kCtasPerCohort;
int block_cohort_m = block_idx_cohort / kCohortCtasN;
int block_cohort_n = block_idx_cohort % kCohortCtasN;
m = (cohort_grid_m * kCohortCtasM) + block_cohort_m;
n = (cohort_grid_n * kCohortCtasN) + block_cohort_n;
}
else if (tiled_shape.m() < tiled_shape.n())
{
// column-major raster
div_mod.tiled_shape_m(n, m, tile_idx);
}
else
{
// row-major raster
div_mod.tiled_shape_n(m, n, tile_idx);
}
int block_idx_k = RematerializeBlockIdxZ();
return GemmCoord{m, n, block_idx_k};
}
/// Obtains calling threadblock's linear threadblock index
CUTLASS_DEVICE
int get_block_idx() const
{
int block_idx = RematerializeBlockIdxX();
int gpu_occupancy = avail_sms * sm_occupancy;
int num_blocks = device_num_blocks();
int dest_sm, dest_wave;
div_mod.sm_occupancy(dest_sm, dest_wave, block_idx);
int remapped_block_idx = dest_sm + (dest_wave * avail_sms);
// remapping the first gpu_occupancy blocks
if ((block_idx < gpu_occupancy) && (num_blocks > gpu_occupancy))
{
block_idx = remapped_block_idx;
}
// Block-index is blockIdx.x for DP blocks
return block_idx;
}
/// Obtains calling linear threadblock index of the first block to work on the given tile
CUTLASS_DEVICE
int get_sk_block_idx(int iter) const
{
int region_idx;
int iter_in_region;
div_mod.sk_iters_per_region(region_idx, iter_in_region, iter);
int big_block_iters = (sk_big_blocks_per_region * sk_iters_per_normal_block) + sk_big_blocks_per_region; // number of iterations in the region's big blocks
int normal_block_iters = iter_in_region - big_block_iters; // number of iterations in the region's normal bocks
int big_block_idx_in_region = div_mod.sk_iters_per_big_block.div(iter_in_region);
int normal_block_idx_in_region = sk_big_blocks_per_region + div_mod.sk_iters_per_normal_block.div(normal_block_iters);
int block_idx_in_region = (big_block_idx_in_region < sk_big_blocks_per_region) ?
big_block_idx_in_region :
normal_block_idx_in_region;
return (sk_blocks_per_region * region_idx) + block_idx_in_region;
}
/// Obtains iteration extends for the given SK block index
CUTLASS_DEVICE
void get_iter_extents(
int sk_block_idx,
int &block_iter_begin,
int &block_iter_end) const
{
int region_idx;
int block_idx_in_region;
div_mod.sk_blocks_per_region(region_idx, block_idx_in_region, sk_block_idx);
block_iter_begin = (region_idx * sk_iters_per_region) + (block_idx_in_region * sk_iters_per_normal_block);
// Adjust extents for the first "num_big_blocks" blocks that get one extra iteration
int block_iters = sk_iters_per_normal_block;
if (block_idx_in_region < sk_big_blocks_per_region) {
// This is a +1 iteration block
block_iter_begin += block_idx_in_region;
block_iters++;
} else {
// This is a regular block
block_iter_begin += sk_big_blocks_per_region;
}
block_iter_end = block_iter_begin + block_iters;
}
/// Obtains calling linear threadblock index of the first block to work on the given tile
CUTLASS_DEVICE
int get_first_block_idx(int tile_idx, int block_idx) const
{
if (tile_idx >= sk_tiles) {
// DP tile
return block_idx;
}
int iter = tile_idx * iters_per_tile;
return get_sk_block_idx(iter);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| 25,617 | C | 31.885751 | 186 | 0.611742 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm70.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting TensorOp instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/tensor_op_multiplicand_sm70.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op_sm70.h"
#include "cutlass/gemm/warp/mma_tensor_op_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<8, 8, 4>, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<8, 8, 4>;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA =
layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value>;
// Shared memory layout
using SmemLayoutB =
layout::RowMajorVoltaTensorOpMultiplicandBCongruous<
sizeof_bits<ElementB>::value>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<8, 8, 4>, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<8, 8, 4>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
layout::PitchLinearShape<4, 8>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
0,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
layout::PitchLinearShape<4, 8>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
1,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<8, 8, 4>, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<8, 8, 4>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<ElementA>::value, Shape::kK>;
// Shared memory layout
using SmemLayoutB = layout::RowMajorVoltaTensorOpMultiplicandBCongruous<
sizeof_bits<ElementB>::value>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
layout::PitchLinearShape<4, 8>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
0,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: tensor op class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<8, 8, 4>, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassTensorOp, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<8, 8, 4>;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassTensorOp;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
Shape::kK / WarpShape::kK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
/// Size of a threadblock-scoped access
static int const kAccessSizeInBits = 128;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<
sizeof_bits<ElementA>::value>;
// Shared memory layout
using SmemLayoutB = layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<ElementB>::value, Shape::kK>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
layout::PitchLinearShape<8, 4>,
kAccessSizeInBits / sizeof_bits<ElementA>::value
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
layout::PitchLinearShape<4, 8>,
kAccessSizeInBits / sizeof_bits<ElementB>::value
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
1,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level tensor op
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using MmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
SmemLayoutA,
ElementB,
SmemLayoutB,
ElementC,
LayoutC,
Policy
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaTensorOp,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| 19,257 | C | 27.196193 | 100 | 0.657787 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/threadblock/default_mma_core_simt.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting simt instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/fast_math.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/gemm/threadblock/default_mma_core.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
namespace detail {
// convert a WarpShape which is the whole tile of elements into warp num threads.
// The goal is for each thread's tile of elements to be as square as possible
// for performance (4x4 will be faster than 2x8).
template<typename WarpShape>
constexpr int simt_get_warp_threads_m() {
return (WarpShape::kM > WarpShape::kN) ? 8 : 4;
}
/// Computes padding in shared memory to perform efficient transpose without bank conflicts.
constexpr int simt_transpose_padding(int threads, int crosswise, int size_in_bits) {
return (size_in_bits >= 32 ?
threads / crosswise / (size_in_bits / 32) :
threads / crosswise * (32 / size_in_bits)
);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::RowMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
SmemThreadMapA // was IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB // was IteratorThreadMapA
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value);
static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value);
static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::RowMajor, ElementB_, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
SmemThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value);
static_assert(!(kPaddingM % LaneM),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::ColumnMajor, ElementB_, layout::ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
kElementsPerAccess
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value);
static_assert(!(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2RowMajor,
ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::ColumnMajor,
ElementB,
layout::RowMajor,
ElementC,
LayoutC,
OperatorClass,
2,
Operator>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementC,
LayoutC,
OperatorClass,
2,
Operator>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2RowMajor, ElementC_,
LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::RowMajor,
ElementC,
LayoutC,
OperatorClass,
2,
Operator>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: column-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_,
layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2ColumnMajor,
ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::AffineRank2ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::AffineRank2ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
/// Default Operator
using Operator = Operator_;
using Base = DefaultMmaCore<Shape,
WarpShape,
InstructionShape,
ElementA,
layout::ColumnMajor,
ElementB,
layout::ColumnMajor,
ElementC,
LayoutC,
OperatorClass,
2,
Operator>;
//
// Shared memory layouts
//
using SmemLayoutA = typename Base::SmemLayoutA;
using SmemLayoutB = typename Base::SmemLayoutB;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = typename Base::IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = typename Base::SmemIteratorA;
/// Policy of iterator B
using IteratorThreadMapB = typename Base::IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = typename Base::SmemIteratorB;
//
// Warp-level matrix multiply operator
//
/// Policy used to define MmaPipelined
using MmaPolicy = typename Base::MmaPolicy;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: simt class, for dp4a
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 4>, int8_t,
layout::ColumnMajor, int8_t, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using LayoutA = layout::ColumnMajor;
using ElementB = int8_t;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorInterleaved<4>;
using SmemLayoutB = layout::RowMajorInterleaved<4>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(4, ThreadTileM);
static const int LaneN = cutlass::const_min(4, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
4>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::ColumnMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
PartitionsK /// Number of partitions along K dimension
>;
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
//
///
/// A: Row-major
/// B: Column-major
/// Operator: simt class, for dp4a
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 4>, int8_t,
layout::RowMajor, int8_t, layout::ColumnMajor, ElementC_,
LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorInterleaved<4>;
using SmemLayoutB = layout::RowMajorInterleaved<4>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMap2DThreadTile<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
SmemThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapB = transform::TransposePitchLinearThreadMap2DThreadTile<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(4, ThreadTileM);
static const int LaneN = cutlass::const_min(4, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
4>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::ColumnMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
PartitionsK /// Number of partitions along K dimension
>;
static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value);
static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value);
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>,
MatrixShape<0, kPaddingN>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
//
///
/// A: Row-major
/// B: Row-major
/// Operator: simt class, for dp4a
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 4>, int8_t,
layout::RowMajor, int8_t, layout::RowMajor, ElementC_,
LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using ElementB = int8_t;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorInterleaved<4>;
using SmemLayoutB = layout::RowMajorInterleaved<4>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kM>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapA = transform::TransposePitchLinearThreadMap2DThreadTile<IteratorThreadMapA>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
SmemThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(4, ThreadTileM);
static const int LaneN = cutlass::const_min(4, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
4>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::ColumnMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
PartitionsK /// Number of partitions along K dimension
>;
static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value);
static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value);
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
//
///
/// A: Column-major
/// B: Column-major
/// Operator: simt class, for dp4a
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 4>, int8_t,
layout::ColumnMajor, int8_t, layout::ColumnMajor, ElementC_,
LayoutC_, arch::OpClassSimt, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using LayoutA = layout::ColumnMajor;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajorInterleaved<4>;
using SmemLayoutB = layout::RowMajorInterleaved<4>;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap<
layout::PitchLinearShape<Shape::kK, Shape::kN>,
kThreads,
layout::PitchLinearShape<4, 4>
>;
/// Transpose the ThreadMap of iterator A
using SmemThreadMapB = transform::TransposePitchLinearThreadMap2DThreadTile<IteratorThreadMapB>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(4, ThreadTileM);
static const int LaneN = cutlass::const_min(4, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
4>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::ColumnMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
PartitionsK /// Number of partitions along K dimension
>;
static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value);
static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value);
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, kPaddingN>,
WarpCount::kK
>;
};
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| 57,426 | C | 32.310325 | 114 | 0.654791 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/device/rank_2k.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined Rank2K kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/rank_2k_universal.h"
#include "cutlass/gemm/kernel/default_rank_2k_universal.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassTensorOp,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm80,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
typename threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementA_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementA_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementB_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by SYRK
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator,
/// Complex elementwise transformation
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex elementwise transformation
ComplexTransform TransformB = ComplexTransform::kNone,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_ = BlasMode::kSymmetric>
class Rank2K {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static FillMode const kFillModeC = FillModeC;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
static BlasMode const kBlasMode = BlasMode_;
static int const kUpdateRank = 2;
// static asserts for rank 2k update kernel
static_assert(platform::is_same<LayoutA, LayoutB>::value,
"Rank 2K update operator support same layouts for operandA and B");
/// Define the kernel
using Rank2Kkernel = typename kernel::DefaultRank2KUniversal<
ElementA,
LayoutA,
kTransformA,
kAlignmentA,
ElementB,
LayoutB,
kTransformB,
kAlignmentB,
ElementC,
LayoutC,
kFillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator,
kBlasMode
>::Rank2Kkernel;
using Arguments = typename Rank2Kkernel::Arguments;
private:
/// Kernel parameters object
typename Rank2Kkernel::Params params_;
public:
/// Constructs the SYRK.
Rank2K() { }
/// Determines whether the SYRK can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
Status status = Rank2Kkernel::can_implement(args);
if (FillModeC != FillMode::kLower && FillModeC != FillMode::kUpper) {
return Status::kErrorInvalidProblem;
}
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial && args.batch_count > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes SYRK state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial) {
if (args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
}
int gemm_k_size = args.problem_size.k();
// Initialize the Params structure
params_ = typename Rank2Kkernel::Params{
args,
grid_tiled_shape,
gemm_k_size,
static_cast<int *>(workspace)
};
int smem_size = int(sizeof(typename Rank2Kkernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<Rank2Kkernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
size_t workspace_bytes = get_workspace_size(args);
if (workspace_bytes && !workspace) {
return Status::kErrorWorkspaceNull;
}
params_.update(args, workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(Rank2Kkernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename Rank2Kkernel::SharedStorage));
cutlass::Kernel<Rank2Kkernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Parital specialization for column-major output exchange operand.
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial,
/// Operation performed by Rank2K update kernel
typename Operator_,
/// Complex elementwise transformation
ComplexTransform TransformA,
/// Complex elementwise transformation
ComplexTransform TransformB,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_
>
class Rank2K<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_,
layout::ColumnMajor, // partially specialized on LayoutC
FillModeC, ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB,
SplitKSerial, Operator_, TransformA, TransformB, BlasMode_> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static FillMode const kFillModeC = FillModeC;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static BlasMode const kBlasMode = BlasMode_;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
static int const kUpdateRank = 2;
/// Define the kernel
using UnderlyingOperator = typename cutlass::gemm::device::Rank2K<
ElementB,
LayoutB,
ElementA,
LayoutA,
ElementC,
layout::RowMajor,
InvertFillMode<FillModeC>::mode,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kAlignmentB,
kAlignmentA,
kSplitKSerial,
Operator,
kTransformA,
kTransformB,
kBlasMode
>;
/// Argument structure
using Arguments = typename UnderlyingOperator::Arguments;
using Rank2Kkernel = typename UnderlyingOperator::Rank2Kkernel;
private:
UnderlyingOperator underlying_operator_;
public:
/// Constructs the Rank2K.
Rank2K() { }
/// Helper to construct a transposed equivalent for the underying Rank2K operator
static Arguments to_underlying_arguments(Arguments const &args) {
return args.transposed_problem();
}
/// Determines whether the Rank2K can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Computes the grid shape
static dim3 get_grid_shape(Arguments const &args) {
return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args));
}
/// Computes the maximum number of active blocks per multiprocessor
static int maximum_active_blocks(int smem_capacity = -1) {
return UnderlyingOperator::maximum_active_blocks(smem_capacity);
}
/// Initializes Rank2K state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace Rank2K
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 18,127 | C | 32.080292 | 102 | 0.674408 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/device/default_gemm_configuration.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Definitions for GEMM structures
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/mma.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
////////////////////////////////////////////////////////////////////////////////
template <
typename OperatorClass,
typename ArchTag,
typename ElementA,
typename ElementB,
typename ElementC,
typename ElementAccumulator
>
struct DefaultGemmConfiguration;
////////////////////////////////////////////////////////////////////////////////
template <
typename ArchTag,
typename ElementA,
typename ElementB,
typename ElementC,
typename ElementAccumulator>
struct DefaultGemmConfiguration<
arch::OpClassSimt,
ArchTag,
ElementA,
ElementB,
ElementC,
ElementAccumulator> {
static int const kAlignmentA = 1;
static int const kAlignmentB = 1;
using ThreadblockShape = GemmShape<128, 128, 8>;
using WarpShape = GemmShape<32, 64, 8>;
using InstructionShape = GemmShape<1, 1, 1>;
static int const kStages = 2;
using EpilogueOutputOp = epilogue::thread::LinearCombination<
ElementC,
1,
ElementAccumulator,
ElementAccumulator
>;
using Operator = arch::OpMultiplyAdd;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ArchTag,
typename ElementC>
struct DefaultGemmConfiguration<arch::OpClassSimt, ArchTag, int8_t, int8_t, ElementC, int32_t> {
static int const kAlignmentA = 4;
static int const kAlignmentB = 4;
using ThreadblockShape = GemmShape<128, 128, 32>;
using WarpShape = GemmShape<32, 64, 32>;
using InstructionShape = GemmShape<1, 1, 4>;
static int const kStages = 2;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC,
1,
int32_t,
float
>;
using Operator = arch::OpMultiplyAdd;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ArchTag,
typename ElementA,
typename ElementB,
typename ElementC,
typename ElementAccumulator>
struct DefaultGemmConfiguration<
arch::OpClassWmmaTensorOp,
ArchTag,
ElementA,
ElementB,
ElementC,
ElementAccumulator> {
static int const kAlignmentA = 128 / sizeof_bits<ElementA>::value;
static int const kAlignmentB = 128 / sizeof_bits<ElementB>::value;
static int const kStages = 2;
using EpilogueOutputOp = epilogue::thread::LinearCombination<
ElementC,
128 / sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementAccumulator
>;
using Operator = arch::OpMultiplyAdd;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename ElementAccumulator>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm70,
ElementA,
ElementB,
ElementC,
ElementAccumulator> {
static int const kAlignmentA = 128 / sizeof_bits<ElementA>::value;
static int const kAlignmentB = 128 / sizeof_bits<ElementB>::value;
using ThreadblockShape = GemmShape<128, 256, 32>;
using WarpShape = GemmShape<64, 64, 32>;
using InstructionShape = GemmShape<8, 8, 4>;
static int const kStages = 2;
using EpilogueOutputOp = epilogue::thread::LinearCombination<
ElementC,
128 / sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementAccumulator
>;
using Operator = arch::OpMultiplyAdd;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename ElementAccumulator>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm75,
ElementA,
ElementB,
ElementC,
ElementAccumulator> {
static int const kAlignmentA = 128 / sizeof_bits<ElementA>::value;
static int const kAlignmentB = 128 / sizeof_bits<ElementA>::value;
using ThreadblockShape = GemmShape<128, 256, 32>;
using WarpShape = GemmShape<64, 64, 32>;
using InstructionShape = GemmShape<16, 8, 8>;
static int const kStages = 2;
using EpilogueOutputOp = epilogue::thread::LinearCombination<
ElementC,
128 / sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementAccumulator
>;
using Operator = typename platform::conditional<
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
arch::OpMultiplyAddSaturate, arch::OpMultiplyAdd>::type;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm75,
int8_t,
int8_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<int8_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<int8_t>::value;
using ThreadblockShape = GemmShape<128, 256, 64>;
using WarpShape = GemmShape<64, 64, 64>;
using InstructionShape = GemmShape<8, 8, 16>;
static int const kStages = 2;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm75,
int8_t,
uint8_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<int8_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<uint8_t>::value;
using ThreadblockShape = GemmShape<128, 256, 64>;
using WarpShape = GemmShape<64, 64, 64>;
using InstructionShape = GemmShape<8, 8, 16>;
static int const kStages = 2;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm75,
uint8_t,
int8_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<uint8_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<int8_t>::value;
using ThreadblockShape = GemmShape<128, 256, 64>;
using WarpShape = GemmShape<64, 64, 64>;
using InstructionShape = GemmShape<8, 8, 16>;
static int const kStages = 2;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm75,
uint8_t,
uint8_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<uint8_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<uint8_t>::value;
using ThreadblockShape = GemmShape<128, 256, 64>;
using WarpShape = GemmShape<64, 64, 64>;
using InstructionShape = GemmShape<8, 8, 16>;
static int const kStages = 2;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm75,
int4b_t,
int4b_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<int4b_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<int4b_t>::value;
using ThreadblockShape = GemmShape<128, 256, 128>;
using WarpShape = GemmShape<64, 64, 128>;
using InstructionShape = GemmShape<8, 8, 32>;
static int const kStages = 2;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm75,
int4b_t,
uint4b_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<int4b_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<uint4b_t>::value;
using ThreadblockShape = GemmShape<128, 256, 128>;
using WarpShape = GemmShape<64, 64, 128>;
using InstructionShape = GemmShape<8, 8, 32>;
static int const kStages = 2;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm75,
uint4b_t,
int4b_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<uint4b_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<int4b_t>::value;
using ThreadblockShape = GemmShape<128, 256, 128>;
using WarpShape = GemmShape<64, 64, 128>;
using InstructionShape = GemmShape<8, 8, 32>;
static int const kStages = 2;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm75,
uint4b_t,
uint4b_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<uint4b_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<uint4b_t>::value;
using ThreadblockShape = GemmShape<128, 256, 128>;
using WarpShape = GemmShape<64, 64, 128>;
using InstructionShape = GemmShape<8, 8, 32>;
static int const kStages = 2;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm75,
uint1b_t,
uint1b_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<uint1b_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<uint1b_t>::value;
using ThreadblockShape = GemmShape<128, 256, 512>;
using WarpShape = GemmShape<64, 64, 512>;
using InstructionShape = GemmShape<8, 8, 128>;
static int const kStages = 2;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpXorPopc;
};
////////////////////////////////////////////////////////////////////////////////
template <typename ElementA, typename ElementB, typename ElementC,
typename ElementAccumulator>
struct DefaultGemmConfiguration<arch::OpClassTensorOp, arch::Sm80, ElementA,
ElementB, ElementC, ElementAccumulator> {
static int const kAlignmentA = 128 / sizeof_bits<ElementA>::value;
static int const kAlignmentB = 128 / sizeof_bits<ElementA>::value;
using ThreadblockShape = GemmShape<128, 256, 64>;
using WarpShape = GemmShape<64, 64, 64>;
using InstructionShape = GemmShape<16, 8, 16>;
static int const kStages = 3;
using EpilogueOutputOp = epilogue::thread::LinearCombination<
ElementC, 128 / sizeof_bits<ElementC>::value, ElementAccumulator,
ElementAccumulator>;
using Operator = typename platform::conditional<
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
arch::OpMultiplyAddSaturate, arch::OpMultiplyAdd>::type;
};
////////////////////////////////////////////////////////////////////////////////
template <typename ElementC,
typename ElementAccumulator>
struct DefaultGemmConfiguration<arch::OpClassTensorOp, arch::Sm80, double,
double, ElementC, ElementAccumulator> {
static int const kAlignmentA = 1;
static int const kAlignmentB = 1;
using ThreadblockShape = GemmShape<128, 256, 64>;
using WarpShape = GemmShape<64, 64, 64>;
using InstructionShape = GemmShape<16, 8, 16>;
static int const kStages = 3;
using EpilogueOutputOp = epilogue::thread::LinearCombination<
ElementC, 128 / sizeof_bits<ElementC>::value, ElementAccumulator,
ElementAccumulator>;
using Operator = arch::OpMultiplyAdd;
};
template <>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm80,
complex<double>,
complex<double>,
complex<double>,
complex<double>
> {
static int const kAlignmentA = 1;
static int const kAlignmentB = 1;
using ThreadblockShape = GemmShape<64, 64, 16>;
using WarpShape = GemmShape<32, 32, 16>;
using InstructionShape = GemmShape<8, 8, 4>;
static int const kStages = 3;
using EpilogueOutputOp = epilogue::thread::LinearCombination<
complex<double>, 1, complex<double>,
complex<double>>;
using Operator = arch::OpMultiplyAddComplex;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm80,
int8_t,
int8_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<int8_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<int8_t>::value;
using ThreadblockShape = GemmShape<128, 256, 64>;
using WarpShape = GemmShape<64, 64, 64>;
using InstructionShape = GemmShape<16, 8, 32>;
static int const kStages = 3;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm80,
int8_t,
uint8_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<int8_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<uint8_t>::value;
using ThreadblockShape = GemmShape<128, 256, 64>;
using WarpShape = GemmShape<64, 64, 64>;
using InstructionShape = GemmShape<16, 8, 32>;
static int const kStages = 3;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm80,
uint8_t,
int8_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<uint8_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<int8_t>::value;
using ThreadblockShape = GemmShape<128, 256, 64>;
using WarpShape = GemmShape<64, 64, 64>;
using InstructionShape = GemmShape<16, 8, 32>;
static int const kStages = 3;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm80,
uint8_t,
uint8_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<uint8_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<uint8_t>::value;
using ThreadblockShape = GemmShape<128, 256, 64>;
using WarpShape = GemmShape<64, 64, 64>;
using InstructionShape = GemmShape<16, 8, 32>;
static int const kStages = 3;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm80,
int4b_t,
int4b_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<int4b_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<int4b_t>::value;
using ThreadblockShape = GemmShape<128, 256, 128>;
using WarpShape = GemmShape<64, 64, 128>;
using InstructionShape = GemmShape<16, 8, 64>;
static int const kStages = 3;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm80,
int4b_t,
uint4b_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<int4b_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<uint4b_t>::value;
using ThreadblockShape = GemmShape<128, 256, 128>;
using WarpShape = GemmShape<64, 64, 128>;
using InstructionShape = GemmShape<16, 8, 64>;
static int const kStages = 3;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm80,
uint4b_t,
int4b_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<uint4b_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<int4b_t>::value;
using ThreadblockShape = GemmShape<128, 256, 128>;
using WarpShape = GemmShape<64, 64, 128>;
using InstructionShape = GemmShape<16, 8, 64>;
static int const kStages = 3;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm80,
uint4b_t,
uint4b_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<uint4b_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<uint4b_t>::value;
using ThreadblockShape = GemmShape<128, 256, 128>;
using WarpShape = GemmShape<64, 64, 128>;
using InstructionShape = GemmShape<16, 8, 64>;
static int const kStages = 3;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAddSaturate;
};
////////////////////////////////////////////////////////////////////////////////
template <
typename ElementC>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm80,
uint1b_t,
uint1b_t,
ElementC,
int32_t> {
static int const kAlignmentA = 128 / sizeof_bits<uint1b_t>::value;
static int const kAlignmentB = 128 / sizeof_bits<uint1b_t>::value;
using ThreadblockShape = GemmShape<128, 256, 512>;
using WarpShape = GemmShape<64, 64, 512>;
using InstructionShape = GemmShape<16, 8, 256>;
static int const kStages = 3;
using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp<
ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>;
using Operator = arch::OpMultiplyAdd;
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
template <typename ElementC,
typename ElementAccumulator>
struct DefaultGemmConfiguration<arch::OpClassTensorOp, arch::Sm90, double,
double, ElementC, ElementAccumulator> {
static int const kAlignmentA = 1;
static int const kAlignmentB = 1;
using ThreadblockShape = GemmShape<128, 256, 64>;
using WarpShape = GemmShape<64, 64, 64>;
using InstructionShape = GemmShape<16, 8, 4>;
static int const kStages = 3;
using EpilogueOutputOp = epilogue::thread::LinearCombination<
ElementC, 128 / sizeof_bits<ElementC>::value, ElementAccumulator,
ElementAccumulator>;
using Operator = arch::OpMultiplyAdd;
};
template <>
struct DefaultGemmConfiguration<
arch::OpClassTensorOp,
arch::Sm90,
complex<double>,
complex<double>,
complex<double>,
complex<double>
> {
static int const kAlignmentA = 1;
static int const kAlignmentB = 1;
using ThreadblockShape = GemmShape<64, 64, 16>;
using WarpShape = GemmShape<32, 32, 16>;
using InstructionShape = GemmShape<16, 8, 4>;
static int const kStages = 3;
using EpilogueOutputOp = epilogue::thread::LinearCombination<
complex<double>, 1, complex<double>,
complex<double>>;
using Operator = arch::OpMultiplyAddComplex;
};
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 24,413 | C | 28.809524 | 100 | 0.63159 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/device/ell_gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a Block-Ell sparse gemm kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/ell_gemm.h"
#include "cutlass/gemm/kernel/default_ell_gemm.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/*! Blocked-Ell sparse gemm device-level operator. This is an interface to efficient CUTLASS
Blocked-Ell kernels that may be invoked from host code.
The contributions of this class are:
1. At compile time, it maps data types and high-level structural parameters onto
specific CUTLASS components.
2. At runtime, it maps logical arguments to Blocked-Ell problems to kernel parameters.
3. At runtime, it launches kernels on the device.
Example of a CUTLASS EllGemm operator is as follows:
//
// Instantiate the CUTLASS EllGemm operator.
//
cutlass::gemm::device::EllGemm<
cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::half_t,
cutlass::layout::ColumnMajor,
float,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<128, 128, 32>,
cutlass::gemm::GemmShape<64, 64, 32>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<
cutlass::half_t, 128 / cutlass::sizeof_bits<cutlass::half_t>::value,
float, float>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
4, // Stages
128 / cutlass::sizeof_bits<cutlass::half_t>::value, // Alignment A
128 / cutlass::sizeof_bits<cutlass::half_t>::value // Alignment B
> ellgemm_op;
//
// Launch the EllGemm operation on the device
//
Description of parameters and tensors used to represent the Blocked-Ellpack (ELL) format:
a_rows - Rows in the sparse matrix.
a_cols - Colums in the sparse matrix.
BlockedEllA - Packed matrix (ellValue matrix) that stores non-zero values in
consecutive blocks, whose size is (a_rows * a_ell_num_columns)
ell_idx - Blocked-ELL Column indices (ellColInd) matrix, whose size is
(a_rows / a_ell_blocksize) * (a_ell_num_columns / a_ell_blocksize)
a_ell_blocksize - Size of the ELL-Blocks.
a_ell_num_columns - Number of columns in the Blocked-Ellpack format (ellValue columns)
B - Input dense matrix whose size is (a_cols * n)
C/D - Output dense matrix whose size is (a_rows * n)
cutlass::Status status = ellgemm_op({
{a_rows, n, a_cols}, // GemmCoord problem_size
{BlockedEllA, lda}, // TensorRef<cutlass::half_t, layout::RowMajor> ref_BlockedEllA
{B, ldb}, // TensorRef<cutlass::half_t, layout::ColumnMajor> ref_B,
{C, ldc}, // TensorRef<float, layout::ColumnMajor> ref_C,
{D, ldd}, // TensorRef<float, layout::ColumnMajor> ref_D,
ell_idx, // Blocked-ELL Column indices or ellColInd matrix (const int*)
a_ell_num_columns, // Columns in the Blocked-Ellpack (ellValue) matrix (int)
a_ell_blocksize, // Size of the ELL-Blocks (int)
a_ell_base, // Base index of ellColInd (int) - Zero or One
{alpha, beta} // EpilogueOutputOp::Params epilogue_op_params
});
A simplified view of the template is listed below.
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB,
/// Supports split-K with serial reduction
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
class EllGemm;
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassTensorOp,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm80,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
typename threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator,
/// Sparse matrix is A or not
bool IsASparse = true
>
class EllGemm {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
static bool const kIsASparse = IsASparse;
/// Define the kernel
using GemmKernel = typename kernel::DefaultEllGemm<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator,
kIsASparse
>::GemmKernel;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A;
TensorRef<ElementB const, LayoutB> ref_B;
TensorRef<ElementC const, LayoutC> ref_C;
TensorRef<ElementC, LayoutC> ref_D;
const int* ell_idx;
int ell_ncol;
int ell_blocksize;
int ell_base_idx;
typename EpilogueOutputOp::Params epilogue;
int split_k_slices;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments(): problem_size(0, 0, 0), split_k_slices(1) {
}
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A_,
TensorRef<ElementB const, LayoutB> ref_B_,
TensorRef<ElementC const, LayoutC> ref_C_,
TensorRef<ElementC, LayoutC> ref_D_,
const int* ell_idx_,
int ell_ncol_,
int ell_blocksize_,
int ell_base_idx_,
typename EpilogueOutputOp::Params epilogue_ =
typename EpilogueOutputOp::Params(),
int split_k_slices = 1
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
ell_idx(ell_idx_),
ell_ncol(ell_ncol_),
ell_blocksize(ell_blocksize_),
ell_base_idx(ell_base_idx_),
epilogue(epilogue_),
split_k_slices(split_k_slices) {
}
};
private:
/// Kernel parameters object
typename GemmKernel::Params params_;
public:
/// Constructs the GEMM.
EllGemm() { }
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
Status status = GemmKernel::can_implement(
args.problem_size,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_C.non_const_ref(),
args.ref_D
);
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{args.ell_blocksize,
ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
tiled_shape.m() *= (args.ell_blocksize + ThreadblockShape::kM - 1 ) / ThreadblockShape::kM;
if (kSplitKSerial && args.split_k_slices > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
Status set(Arguments const &args, cutlass::gemm::GemmCoord const &grid_shape, void *workspace){
// Initialize the Params structure
params_ = typename GemmKernel::Params{
args.problem_size,
grid_shape,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_C.non_const_ref(),
args.ref_D,
args.ell_idx,
args.ell_ncol,
args.ell_blocksize,
args.ell_base_idx,
args.epilogue,
static_cast<int *>(workspace)
};
return Status::kSuccess;
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{args.ell_blocksize, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
grid_shape.m() *= (args.ell_blocksize + ThreadblockShape::kM - 1 ) / ThreadblockShape::kM;
if (kSplitKSerial) {
if (args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
}
return set(args, grid_shape, workspace);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
params_.ref_A.reset(args.ref_A.non_const_ref().data());
params_.ref_B.reset(args.ref_B.non_const_ref().data());
params_.ref_C.reset(args.ref_C.non_const_ref().data());
params_.ref_D.reset(args.ref_D.data());
params_.output_op = args.epilogue;
params_.semaphore = static_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(GemmKernel::kThreadCount, 1, 1);
cudaError_t result;
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
result = cudaFuncSetAttribute(Kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
cutlass::Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params_);
result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Parital specialization for column-major output exchanges problem size and operand.
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB,
/// If true, kernel supports split-K as a serial reduction
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator_,
/// Sparse matrix is A or not
bool IsASparse>
class EllGemm<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_,
layout::ColumnMajor, // partially specialized on LayoutC
ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB,
SplitKSerial, Operator_, IsASparse> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
static bool const kSplitKSerial = SplitKSerial;
static bool const kIsASparse = false;
using UnderlyingOperator = EllGemm<
ElementB,
typename layout::LayoutTranspose<LayoutB>::type,
ElementA,
typename layout::LayoutTranspose<LayoutA>::type,
ElementC,
layout::RowMajor,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
kAlignmentB,
kAlignmentA,
SplitKSerial,
Operator,
kIsASparse
>;
using UnderlyingArguments = typename UnderlyingOperator::Arguments;
using GemmKernel = typename UnderlyingOperator::GemmKernel;
static int const kAlignmentC = UnderlyingOperator::kAlignmentC;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A;
TensorRef<ElementB const, LayoutB> ref_B;
TensorRef<ElementC const, LayoutC> ref_C;
TensorRef<ElementC, LayoutC> ref_D;
const int* ell_idx;
int ell_ncol;
int ell_blocksize;
int ell_base_idx;
typename EpilogueOutputOp::Params epilogue;
int split_k_slices;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments() { }
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A_,
TensorRef<ElementB const, LayoutB> ref_B_,
TensorRef<ElementC const, LayoutC> ref_C_,
TensorRef<ElementC, LayoutC> ref_D_,
const int* ell_idx_,
int ell_ncol_,
int ell_blocksize_,
int ell_base_idx_,
typename EpilogueOutputOp::Params epilogue_ =
typename EpilogueOutputOp::Params(),
int split_k_slices = 1
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
ell_idx(ell_idx_),
ell_ncol(ell_ncol_),
ell_blocksize(ell_blocksize_),
ell_base_idx(ell_base_idx_),
epilogue(epilogue_),
split_k_slices(split_k_slices) { }
};
private:
UnderlyingOperator underlying_operator_;
public:
/// Constructs the GEMM.
EllGemm() { }
/// Helper to construct a transposed equivalent for the underying GEMM operator
static UnderlyingArguments to_underlying_arguments(Arguments const &args) {
return UnderlyingArguments(
{args.problem_size.n(), args.problem_size.m(), args.problem_size.k()},
{args.ref_B.data(), args.ref_B.stride(0)},
{args.ref_A.data(), args.ref_A.stride(0)},
{args.ref_C.data(), args.ref_C.stride(0)},
{args.ref_D.data(), args.ref_D.stride(0)},
args.ell_idx,
args.ell_ncol,
args.ell_blocksize,
args.ell_base_idx,
args.epilogue,
args.split_k_slices
);
}
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, args.ell_blocksize, ThreadblockShape::kK},
args.split_k_slices);
tiled_shape.n() *= (args.ell_blocksize + ThreadblockShape::kN - 1 ) / ThreadblockShape::kN;
if (kSplitKSerial && args.split_k_slices > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
Status set(Arguments const &args, cutlass::gemm::GemmCoord const &grid_shape, void *workspace){
// Initialize the Params structure
return underlying_operator_.set(to_underlying_arguments(args), grid_shape, workspace);
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
{args.problem_size.n(), args.problem_size.m(), args.problem_size.k()},
{ThreadblockShape::kM, args.ell_blocksize, ThreadblockShape::kK},
args.split_k_slices);
grid_shape.n() *= (args.ell_blocksize + ThreadblockShape::kN - 1 ) / ThreadblockShape::kN;
if (kSplitKSerial) {
if (args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
}
// Initialize the Params structure
set(args, grid_shape, workspace);
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 27,616 | C | 31.528857 | 102 | 0.644373 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/device/gemm_sparse.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/sparse_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_sparse.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/*! Gemm device-level operator. This is an interface to efficient CUTLASS GEMM kernels that may
be invoked from host code.
The contributions of this class are:
1. At compile time, it maps data types and high-level structural parameters onto
specific CUTLASS components.
2. At runtime, it maps logical arguments to GEMM problems to kernel parameters.
3. At runtime, it launches kernels on the device.
The intent is to provide a convenient mechanism for interacting with most plausible GEMM
configurations for each supported architecture. Consequently, not all parameters are exposed
to the top-level interface. Rather, sensible defaults at each level of the CUTLASS hierarchy
are selected to tradeoff simplicity of the interface with flexibility. We expect
most configurations to be specified at this level. Applications with more exotic requirements
may construct their kernels of interest using CUTLASS components at the threadblock, warp,
and thread levels of abstraction.
CUTLASS exposes computations using the functor design pattern in which objects compose some
internal state with an overloaded function call operator. This enables decoupling of
initialization from execution, possibly reducing overhead during steady state phases of
application execution.
CUTLASS device-level operators expose an Arguments structure encompassing each logical
input to the computation. This is distinct from the kernel-level Params structure pattern
which contains application-specific precomputed state needed by the device code.
Example of a CUTLASS GEMM operator implementing the functionality of cuBLAS's SGEMM NN
is as follows:
//
// Instantiate the CUTLASS GEMM operator.
//
cutlass::gemm::device::Gemm<
float,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::ColumnMajor
> gemm_op;
//
// Launch the GEMM operation on the device
//
cutlass::Status status = gemm_op({
{m, n, k}, // GemmCoord problem_size,
{A, lda}, // TensorRef<float, layout::ColumnMajor> ref_A,
{B, ldb}, // TensorRef<float, layout::ColumnMajor> ref_B,
{C, ldc}, // TensorRef<float, layout::ColumnMajor> ref_C,
{D, ldd}, // TensorRef<float, layout::ColumnMajor> ref_D,
{alpha, beta} // EpilogueOutputOp::Params epilogue_op_params
});
A simplified view of the template is listed below.
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages
>
class Gemm;
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassSimt,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm70,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
typename threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator>
class SparseGemm {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
using MathOperator = Operator;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Define the kernel
using GemmKernel = typename kernel::DefaultSparseGemm<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator
>::GemmKernel;
using ElementE = typename GemmKernel::ElementE;
using LayoutE = typename GemmKernel::LayoutE;
static int const kAlignmentE = 128 / sizeof_bits<ElementE>::value;
static int const kSparse = GemmKernel::kSparse;
static int const kMetaSizeInBits = GemmKernel::kMetaSizeInBits;
static int const kElementsPerElementE = GemmKernel::kElementsPerElementE;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A;
TensorRef<ElementB const, LayoutB> ref_B;
TensorRef<ElementC const, LayoutC> ref_C;
TensorRef<ElementC, LayoutC> ref_D;
TensorRef<ElementE const, LayoutE> ref_E;
typename EpilogueOutputOp::Params epilogue;
int split_k_slices;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments(): problem_size(0, 0, 0), split_k_slices(1) {
}
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A_,
TensorRef<ElementB const, LayoutB> ref_B_,
TensorRef<ElementC const, LayoutC> ref_C_,
TensorRef<ElementC, LayoutC> ref_D_,
TensorRef<ElementE, LayoutE> ref_E_,
typename EpilogueOutputOp::Params epilogue_ =
typename EpilogueOutputOp::Params(),
int split_k_slices = 1
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
ref_E(ref_E_),
epilogue(epilogue_),
split_k_slices(split_k_slices) {
}
};
private:
/// Kernel parameters object
typename GemmKernel::Params params_;
public:
/// Constructs the GEMM.
SparseGemm() { }
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
Status status = GemmKernel::can_implement(
args.problem_size,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_C.non_const_ref(),
args.ref_D,
args.ref_E.non_const_ref()
);
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
if (kSplitKSerial && args.split_k_slices > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
if (kSplitKSerial) {
if (args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
}
// Initialize the Params structure
params_ = typename GemmKernel::Params{
args.problem_size,
grid_shape,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_C.non_const_ref(),
args.ref_D,
args.ref_E.non_const_ref(),
args.epilogue,
static_cast<int *>(workspace)
};
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
params_.ref_A.reset(args.ref_A.non_const_ref().data());
params_.ref_B.reset(args.ref_B.non_const_ref().data());
params_.ref_C.reset(args.ref_C.non_const_ref().data());
params_.ref_D.reset(args.ref_D.data());
params_.ref_E.reset(args.ref_E.non_const_ref().data());
params_.output_op = args.epilogue;
params_.semaphore = static_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(GemmKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
cutlass::Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 17,329 | C | 32.650485 | 102 | 0.659242 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/device/trmm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a TRMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/trmm_universal.h"
#include "cutlass/gemm/kernel/default_trmm_universal.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/*! Trmm device-level operator. This is an interface to efficient CUTLASS TRMM kernels that may
be invoked from host code.
The contributions of this class are:
1. At compile time, it maps data types and high-level structural parameters onto
specific CUTLASS components.
2. At runtime, it maps logical arguments to TRMM problems to kernel parameters.
3. At runtime, it launches kernels on the device.
The intent is to provide a convenient mechanism for interacting with most plausible TRMM
configurations for each supported architecture. Consequently, not all parameters are exposed
to the top-level interface. Rather, sensible defaults at each level of the CUTLASS hierarchy
are selected to tradeoff simplicity of the interface with flexibility. We expect
most configurations to be specified at this level. Applications with more exotic requirements
may construct their kernels of interest using CUTLASS components at the threadblock, warp,
and thread levels of abstraction.
CUTLASS exposes computations using the functor design pattern in which objects compose some
internal state with an overloaded function call operator. This enables decoupling of
initialization from execution, possibly reducing overhead during steady state phases of
application execution.
CUTLASS device-level operators expose an Arguments structure encompassing each logical
input to the computation. This is distinct from the kernel-level Params structure pattern
which contains application-specific precomputed state needed by the device code.
Example of a CUTLASS TRMM operator implementing the functionality of cuBLAS's STRMM NN
is as follows:
//
// Instantiate the CUTLASS TRMM operator.
//
cutlass::gemm::device::Trmm<
float,
cutlass::layout::ColumnMajor,
cutlass::SideMode::kLeft,
cutlass::FillMode::kLower,
cutlass::DiagType::kNonUnit,
float,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::ColumnMajor,
> trmm_op;
//
// Launch the TRMM operation on the device
//
cutlass::Status status = trmm_op({
cutlass::gemm::GemmUniversalMode, // Trmm Problem Mode
{m, n, m/n}, // GemmCoord problem_size (k is based on left- or right-side mode)
batch_count,
{alpha}, // EpilogueOutputOp::Params epilogue_op_params
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int lda,
int ldb,
int ldc
});
A simplified view of the template is listed below.
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode SideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode FillModeA,
/// DiagType for A (kNonUnit or kUnit)
DiagType DiagTypeA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial,
/// Operation performed by TRMM
typename Operator,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA
>
class Trmm;
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Side Mode for A
SideMode SideModeA,
/// Fill Mode for A
FillMode FillModeA,
/// DiagType for A
DiagType DiagTypeA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassTensorOp,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm80,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = epilogue::thread::LinearCombination<
ElementC_,
128 / sizeof_bits<ElementC_>::value,
ElementAccumulator_,
ElementAccumulator_,
epilogue::thread::ScaleType::OnlyAlphaScaling
>,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by TRMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA = ComplexTransform::kNone>
class Trmm {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementAKernel = typename platform::conditional<(SideModeA == SideMode::kRight), ElementB_, ElementA_>::type;
using LayoutAKernel = typename platform::conditional<(SideModeA == SideMode::kRight), LayoutB_, LayoutA_>::type;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementBKernel = typename platform::conditional<(SideModeA == SideMode::kRight), ElementA_, ElementB_>::type;
using LayoutBKernel = typename platform::conditional<(SideModeA == SideMode::kRight), LayoutA_, LayoutB_>::type;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static SideMode const kSideMode = SideModeA;
static FillMode const kFillMode = FillModeA;
static DiagType const kDiagType = DiagTypeA;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentAKernel = (SideModeA == SideMode::kRight) ? AlignmentB : AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentBKernel = (SideModeA == SideMode::kRight) ? AlignmentA : AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
// Complex Transform don't appply to B
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
static ComplexTransform const kTransformAKernel = (SideModeA == SideMode::kRight) ?
ComplexTransform::kNone : TransformA;
static ComplexTransform const kTransformBKernel = (SideModeA == SideMode::kRight) ?
TransformA : ComplexTransform::kNone;
/// Define the kernel
using TrmmKernel = typename kernel::DefaultTrmmUniversal<
ElementAKernel,
LayoutAKernel,
kTransformAKernel,
kAlignmentAKernel,
ElementBKernel,
LayoutBKernel,
kTransformBKernel,
kAlignmentBKernel,
kSideMode,
kFillMode,
kDiagType,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator
>::TrmmKernel;
using Arguments = typename TrmmKernel::Arguments;
private:
/// Kernel parameters object
typename TrmmKernel::Params params_;
public:
/// Constructs the TRMM.
Trmm() { }
/// Determines whether the TRMM can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
Status status = TrmmKernel::can_implement(args);
if (SideModeA == SideMode::kInvalid) {
return Status::kErrorInvalidProblem;
}
if (FillModeA == FillMode::kInvalid) {
return Status::kErrorInvalidProblem;
}
if (DiagTypeA == DiagType::kInvalid) {
return Status::kErrorInvalidProblem;
}
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial && args.batch_count > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes TRMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial) {
if (args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
}
int gemm_k_size = args.problem_size.k();
// Swapping argument for A and B, if A was on the right side (problem size doesn't need to change here).
if (kSideMode == SideMode::kRight) {
// Initialize the Params structure
params_ = typename TrmmKernel::Params{
args.swapped_matrices(),
grid_tiled_shape,
gemm_k_size,
static_cast<int *>(workspace)
};
return Status::kSuccess;
}
// Initialize the Params structure
params_ = typename TrmmKernel::Params{
args,
grid_tiled_shape,
gemm_k_size,
static_cast<int *>(workspace)
};
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
size_t workspace_bytes = get_workspace_size(args);
if (workspace_bytes && !workspace) {
return Status::kErrorWorkspaceNull;
}
params_.update(args, workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(TrmmKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename TrmmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<TrmmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
cutlass::Kernel<TrmmKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
/********************************************************************************************************
TRMM has 4 combinations based on Layouts {RowMajor, ColumnMajor} x Side mode {LeftSide, RightSide}
In templates and arguments to cutlass kernel, `matrix A` is always triangular, and `matrix B` is rectangular.
(adhering to the cuBLAS convention)
For the mainloop and trmm kernel, `A` and `B` points to left-side and right-side matrices, respectively.
Thus, for LeftSide mode `A` and `B` points to `matrix A` and `matrix B`, respectively. While for
the RightSide mode `A` and `B` points to `matrix B` and `matrix A`, respectively.
Additionally, CUTLASS GEMM epilogue is always RowMajor, and ColumnMajor output is achieved by
transposing the GEMM problem. Thus, ColumnMajor output layout for TRMM requires:
- Transposing `matrix A` and `matrix B` layouts
- Swapping problem size m and n values
- Swapping LeftSide and RightSide mode
RowMajor output: D = matrix A x matrix B
ColumnMajor output: D = matrix A x matrix B -> Transpose (D) = Transpose(matrix B) x Transpose(matrix A)
{RowMajor, ColumnMajor} x Side Mode {LeftSide, RightSide} 4 cases:
1. LeftSide mode and RowMajor output (default template)
2. LeftSide mode and ColumnMajor output
3. RightSide mode and RowMajor output
4. RightSide mode and ColumnMajor output
Mapping ColumnMajor output layout cases 2 and 4 to RowMajor efficient epilogue implementation:
Case 2 -> Case 3:
D_col = matrix A x matrix B (LeftSide mode)
=> Transpose(D_col) = Transpose(matrix B) x Transpose(matrix A) (RightSide mode)
swap pointers for `A` and `B` call GEMM mainloop with RowMajor efficient-epilogue
Case 4 -> Case 1:
D_col = matrix B x matrix A (RightSide mode)
=> Transpose(D_col) = Transpose(matrix A) x Transpose(matrix B) (LeftSide mode)
call GEMM mainloop for with RowMajor efficient-epilogue
********************************************************************************************************/
/// Parital specialization for column-major output exchanges problem size and operand.
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Side Mode for A
SideMode SideModeA,
/// Fill Mode for A
FillMode FillModeA,
/// DiagType for A
DiagType DiagTypeA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB,
/// If true, kernel supports split-K as a serial reduction
bool SplitKSerial,
/// Operation performed by TRMM
typename Operator_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA>
class Trmm<ElementA_, LayoutA_, SideModeA, FillModeA, DiagTypeA,
ElementB_, LayoutB_, ElementC_,
layout::ColumnMajor, // partially specialized on LayoutC
ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB, SplitKSerial,
Operator_, TransformA> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static SideMode const kSideMode = SideModeA;
static FillMode const kFillMode = FillModeA;
static DiagType const kDiagType = DiagTypeA;
// Changing SideMode as we change the layout
static SideMode const kSideModeT = (SideModeA == SideMode::kLeft) ?
SideMode::kRight : SideMode::kLeft;
// Changing FillMode as we change the layout
static FillMode const kFillModeT = (FillModeA == FillMode::kLower) ?
FillMode::kUpper : FillMode::kLower;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static ComplexTransform const kTransformA = TransformA;
// Complex Transform don't appply to B
static ComplexTransform const kTransformB = ComplexTransform::kNone;
static bool const kSplitKSerial = SplitKSerial;
using UnderlyingOperator = Trmm<
ElementA,
typename layout::LayoutTranspose<LayoutA>::type,
kSideModeT,
kFillModeT,
kDiagType,
ElementB,
typename layout::LayoutTranspose<LayoutB>::type,
ElementC,
layout::RowMajor,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kAlignmentA,
kAlignmentB,
kSplitKSerial,
Operator,
TransformA
>;
using Arguments = typename UnderlyingOperator::Arguments;
using TrmmKernel = typename UnderlyingOperator::TrmmKernel;
static int const kAlignmentC = UnderlyingOperator::kAlignmentC;
private:
UnderlyingOperator underlying_operator_;
public:
/// Constructs the TRMM.
Trmm() { }
/// Helper to construct a transposed equivalent for the underying TRMM operator which is identical
static Arguments to_underlying_arguments(Arguments const &args) {
return args.transposed_problem_size();
}
/// Determines whether the TRMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Initializes TRMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 26,464 | C | 33.868248 | 117 | 0.671781 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/device/gemm_splitk_parallel.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for GEMM performing a reduction over K partitions in parallel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/gemm.h"
#include "cutlass/gemm/kernel/default_gemm_splitk_parallel.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/reduction/kernel/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
////////////////////////////////////////////////////////////////////////////////
/*!
Gemm device-level operator performing parallel reduction over the K partition.
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassSimt,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_ = arch::Sm70,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Epilogue output operator
typename ConvertScaledOp_ = cutlass::epilogue::thread::Convert<
ElementAccumulator_,
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementAccumulator_,
ElementAccumulator_>::EpilogueOutputOp::kCount,
ElementAccumulator_>,
/// Reduction operator
typename ReductionOp_ = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator_, typename EpilogueOutputOp_::ElementAccumulator,
EpilogueOutputOp_::kCount>,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
threadblock::GemmSplitKHorizontalThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int kAlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int kAlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator>
class GemmSplitKParallel {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ConvertScaledOp = ConvertScaledOp_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ReductionOp = ReductionOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static int const kStages = Stages;
/// GEMM kernel
using GemmKernel = typename kernel::DefaultGemmSplitKParallel<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
ConvertScaledOp,
ThreadblockSwizzle,
kStages,
Operator
>::GemmKernel;
/// Reduction kernel
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>;
//
//
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A;
TensorRef<ElementB const, LayoutB> ref_B;
TensorRef<ElementC const, LayoutC> ref_C;
TensorRef<ElementC, LayoutC> ref_D;
typename EpilogueOutputOp::Params epilogue;
int split_k_slices;
typename ConvertScaledOp::Params convert;
typename ReductionOp::Params reduction;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments() { }
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A_,
TensorRef<ElementB const, LayoutB> ref_B_,
TensorRef<ElementC const, LayoutC> ref_C_,
TensorRef<ElementC, LayoutC> ref_D_,
typename EpilogueOutputOp::Params epilogue_ =
typename EpilogueOutputOp::Params(),
int split_k_slices = 1,
typename ConvertScaledOp::Params convert_ =
typename ConvertScaledOp::Params(),
typename ReductionOp::Params reduction_ =
typename ReductionOp::Params()
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
epilogue(epilogue_),
split_k_slices(split_k_slices),
convert(convert_),
reduction(reduction_) { }
};
private:
/// Kernel parameters object
typename GemmKernel::Params gemm_params_;
/// Reduction kernel parameters object
typename ReductionKernel::Params reduction_params_;
public:
/// Constructs the GEMM.
GemmSplitKParallel() { }
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
// TODO
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
return sizeof(ElementAccumulator_) * size_t(args.problem_size.m()) * size_t(args.problem_size.n()) * grid_shape.k();
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
// Define a reference to the workspace - this is an aligned region in device memory.
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
TensorRef<ElementAccumulator_, layout::RowMajor> ref_workspace(
static_cast<ElementAccumulator_ *>(workspace),
args.problem_size.n());
int64_t partition_stride = int64_t(args.problem_size.m()) * int64_t(args.problem_size.n());
// Initialize the Params structure
gemm_params_ = typename GemmKernel::Params{
args.problem_size,
grid_shape,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
ref_workspace,
args.convert,
partition_stride
};
reduction_params_ = typename ReductionKernel::Params(
args.problem_size.mn(),
grid_shape.k(),
partition_stride,
ref_workspace,
args.ref_D,
args.ref_C.non_const_ref(),
args.epilogue
);
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
gemm_params_.ref_A.reset(args.ref_A.data());
gemm_params_.ref_B.reset(args.ref_B.data());
gemm_params_.ref_D.reset(workspace);
reduction_params_.ref_D.reset(args.ref_D.data());
reduction_params_.ref_C.reset(args.ref_C.data());
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
//
// Launch GEMM kernel
//
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(gemm_params_.grid_tiled_shape);
dim3 block(GemmKernel::kThreadCount, 1, 1);
cudaError_t result;
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
result = cudaFuncSetAttribute(
Kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(gemm_params_);
result = cudaGetLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
//
// Launch reduction kernel
//
block = ReductionKernel::block_shape();
grid = ReductionKernel::grid_shape(gemm_params_.problem_size.mn());
Kernel<ReductionKernel><<< grid, block, 0, stream >>>(reduction_params_);
result = cudaGetLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for column-major output
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp_,
/// Epilogue output operator
typename ConvertScaledOp_,
/// Reduction operator
typename ReductionOp_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_,
/// Number of stages used in the pipelined mainloop
int Stages, int kAlignmentA, int kAlignmentB,
/// Operation performed by GEMM
typename Operator_>
class GemmSplitKParallel<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_,
layout::ColumnMajor, ElementAccumulator_,
OperatorClass_, ArchTag_, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ConvertScaledOp_, ReductionOp_, ThreadblockSwizzle_,
Stages, kAlignmentA, kAlignmentB, Operator_> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ConvertScaledOp = ConvertScaledOp_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ReductionOp = ReductionOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static int const kStages = Stages;
using UnderlyingOperator = GemmSplitKParallel<
ElementB,
typename layout::LayoutTranspose<LayoutB>::type,
ElementA,
typename layout::LayoutTranspose<LayoutA>::type,
ElementC,
layout::RowMajor,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ConvertScaledOp,
ReductionOp,
ThreadblockSwizzle,
Stages,
kAlignmentA,
kAlignmentB,
Operator
>;
using UnderlyingArguments = typename UnderlyingOperator::Arguments;
using GemmKernel = typename UnderlyingOperator::GemmKernel;
using ReductionKernel = typename UnderlyingOperator::ReductionKernel;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A;
TensorRef<ElementB const, LayoutB> ref_B;
TensorRef<ElementC const, LayoutC> ref_C;
TensorRef<ElementC, LayoutC> ref_D;
typename EpilogueOutputOp::Params epilogue;
int split_k_slices;
typename ConvertScaledOp::Params convert;
typename ReductionOp::Params reduction;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments() { }
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A_,
TensorRef<ElementB const, LayoutB> ref_B_,
TensorRef<ElementC const, LayoutC> ref_C_,
TensorRef<ElementC, LayoutC> ref_D_,
typename EpilogueOutputOp::Params epilogue_ =
typename EpilogueOutputOp::Params(),
int split_k_slices = 1,
typename ConvertScaledOp::Params convert_ =
typename ConvertScaledOp::Params(),
typename ReductionOp::Params reduction_ =
typename ReductionOp::Params()
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
epilogue(epilogue_),
split_k_slices(split_k_slices),
convert(convert_),
reduction(reduction_) { }
};
private:
/// Kernel parameters object
UnderlyingOperator underlying_operator_;
public:
/// Constructs the GEMM.
GemmSplitKParallel() { }
/// Helper to construct a transposed equivalent for the underying GEMM operator
static UnderlyingArguments to_underlying_arguments(Arguments const &args) {
return UnderlyingArguments(
{args.problem_size.n(), args.problem_size.m(), args.problem_size.k()},
{args.ref_B.data(), args.ref_B.stride(0)},
{args.ref_A.data(), args.ref_A.stride(0)},
{args.ref_C.data(), args.ref_C.stride(0)},
{args.ref_D.data(), args.ref_D.stride(0)},
args.epilogue,
args.split_k_slices,
args.convert,
args.reduction
);
}
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 20,450 | C | 31.004695 | 120 | 0.663912 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/device/gemm_with_k_reduction.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a GEMM kernel that can reduce one of the input matrix
into a vector along the K dimension.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/gemm_with_k_reduction.h"
#include "cutlass/gemm/kernel/default_gemm_with_k_reduction.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/device/gemm_universal_base.h"
#include "cutlass/layout/permute.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/*!
The universal GEMM accommodates serial reductions, parallel reductions, batched strided, and
batched array variants.
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassSimt,
/// Reduce A or B operand along the K dimension
bool ReduceKForA_ = true,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_ = arch::Sm70,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB = ComplexTransform::kNone,
/// Gather operand A by using an index array
bool GatherA = false,
/// Gather operand B by using an index array
bool GatherB = false,
/// Scatter result D by using an index array
bool ScatterD = false,
/// Permute result D
typename PermuteDLayout = layout::NoPermute
>
class GemmWithKReduction :
public GemmUniversalBase<
typename kernel::DefaultGemmWithKReduction<
ElementA_,
LayoutA_,
TransformA,
AlignmentA,
ElementB_,
LayoutB_,
TransformB,
AlignmentB,
ElementC_,
LayoutC_,
ElementAccumulator_,
OperatorClass_,
ReduceKForA_,
ArchTag_,
ThreadblockShape_,
WarpShape_,
InstructionShape_,
EpilogueOutputOp_,
ThreadblockSwizzle_,
Stages,
Operator_,
SharedMemoryClearOption::kNone
>::GemmKernel
> {
public:
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static constexpr int kStages = Stages;
static constexpr int kAlignmentA = AlignmentA;
static constexpr int kAlignmentB = AlignmentB;
static constexpr int kAlignmentC = EpilogueOutputOp::kCount;
static constexpr ComplexTransform kTransformA = TransformA;
static constexpr ComplexTransform kTransformB = TransformB;
using Base = GemmUniversalBase<
typename kernel::DefaultGemmWithKReduction<
ElementA_,
LayoutA_,
TransformA,
AlignmentA,
ElementB_,
LayoutB_,
TransformB,
AlignmentB,
ElementC_,
LayoutC_,
ElementAccumulator_,
OperatorClass_,
ReduceKForA_,
ArchTag_,
ThreadblockShape_,
WarpShape_,
InstructionShape_,
EpilogueOutputOp_,
ThreadblockSwizzle_,
Stages,
Operator_,
SharedMemoryClearOption::kNone
>::GemmKernel
>;
using Arguments = typename Base::Arguments;
using GemmKernel = typename Base::GemmKernel;
};
////////////////////////////////////////////////////////////////////////////////
/// Parital specialization for column-major output exchanges problem size and operand.
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Reduce A or B operand along the K dimension
bool ReduceKForA_,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB,
/// Operation performed by GEMM
typename Operator_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Scatter result D by using an index array
bool ScatterD,
/// Permute result D
typename PermuteDLayout
>
class GemmWithKReduction<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_,
layout::ColumnMajor, // partially specialized on LayoutC
ElementAccumulator_, OperatorClass_, ReduceKForA_, ArchTag_, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB,
Operator_, TransformA, TransformB, GatherA, GatherB, ScatterD, PermuteDLayout> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using UnderlyingOperator = typename GemmWithKReduction<
ElementB,
typename layout::LayoutTranspose<LayoutB>::type,
ElementA,
typename layout::LayoutTranspose<LayoutA>::type,
ElementC,
layout::RowMajor,
ElementAccumulator,
OperatorClass,
!ReduceKForA_,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
kAlignmentB,
kAlignmentA,
Operator,
kTransformB,
kTransformA,
GatherB,
GatherA,
ScatterD,
PermuteDLayout
>::Base;
using GemmKernel = typename UnderlyingOperator::GemmKernel;
static int const kAlignmentC = EpilogueOutputOp::kCount;
/// Argument structure
using Arguments = typename UnderlyingOperator::Arguments;
private:
UnderlyingOperator underlying_operator_;
public:
/// Constructs the GEMM.
GemmWithKReduction() = default;
/// Helper to construct a transposed equivalent for the underying GEMM operator
static Arguments to_underlying_arguments(Arguments const &args) {
return args.transposed_problem();
}
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Computes the grid shape
static dim3 get_grid_shape(Arguments const &args) {
return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args));
}
/// Computes the maximum number of active blocks per multiprocessor
static int maximum_active_blocks(int smem_capacity = -1) {
return UnderlyingOperator::maximum_active_blocks(smem_capacity);
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 14,853 | C | 34.706731 | 102 | 0.683094 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/device/gemv.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/gemm_universal.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/device/gemm_universal_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename GemvKernel_>
class Gemv {
public:
using GemvKernel = GemvKernel_;
using ElementA = typename GemvKernel::ElementA;
using LayoutA = typename GemvKernel::LayoutA;
using ElementB = typename GemvKernel::ElementB;
using ElementC = typename GemvKernel::ElementC;
using ElementAccumulator = typename GemvKernel::ElementAccumulator;
using EpilogueOutputOp = typename GemvKernel::EpilogueOutputOp;
static ComplexTransform const kTransformA = GemvKernel::kTransformA;
static ComplexTransform const kTransformB = GemvKernel::kTransformB;
static int const kThreadCount = GemvKernel::kThreadCount;
static int const kStages = GemvKernel::kStages;
static int const kAlignmentA = GemvKernel::kAlignmentA;
static int const kAlignmentB = GemvKernel::kAlignmentB;
static int const kAlignmentC = GemvKernel::kAlignmentC;
using Arguments = typename GemvKernel::Arguments;
using Params = typename GemvKernel::Params;
private:
Params params_;
public:
/// Constructs the Gemv.
Gemv() { }
/// Determines whether the Gemv can execute the given problem.
static Status can_implement(Arguments const &args) {
return GemvKernel::can_implement(args);
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return 0;
}
/// Computes the grid shape
static dim3 get_grid_shape(Arguments const &args) {
return dim3((args.problem_size.row() + (kThreadCount - 1)) / kThreadCount, 1, args.batch_count % 65565);
}
/// Initializes Gemv state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
params_ = Params(args);
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return params_.update(args);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
dim3 grid = get_grid_shape(params_);
dim3 block(GemvKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename GemvKernel::SharedStorage));
// Launch
cutlass::Kernel<GemvKernel><<<grid, block, smem_size, stream>>>(params_);
//
// Query for errors
//
cudaError_t result = cudaGetLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 5,690 | C | 31.52 | 108 | 0.655888 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/device/base_grouped.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Base device-level grouped kernel.
*/
#pragma once
#include <limits>
#include <numeric>
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/gemm_universal.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/trace.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM Grouped
template <typename BaseKernel_>
class BaseGrouped {
public:
using BaseKernel = BaseKernel_;
using ElementA = typename BaseKernel::ElementA;
using LayoutA = typename BaseKernel::LayoutA;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
static ComplexTransform const kTransformA = BaseKernel::kTransformA;
static int const kAlignmentA = BaseKernel::kAlignmentA;
using ElementB = typename BaseKernel::ElementB;
using LayoutB = typename BaseKernel::LayoutB;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
static ComplexTransform const kTransformB = BaseKernel::kTransformB;
static int const kAlignmentB = BaseKernel::kAlignmentB;
using ElementC = typename BaseKernel::ElementC;
using LayoutC = typename BaseKernel::LayoutC;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
static int const kAlignmentC = BaseKernel::kAlignmentC;
using ElementAccumulator = typename BaseKernel::Mma::Policy::Operator::ElementC;
using EpilogueOutputOp = typename BaseKernel::EpilogueOutputOp;
using ThreadblockSwizzle = typename BaseKernel::ThreadblockSwizzle;
using Operator = typename BaseKernel::Operator;
using WarpMmaOperator = typename BaseKernel::Mma::Policy::Operator;
using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator;
using MathOperator = typename WarpMmaOperator::MathOperator;
using OperatorClass = typename WarpMmaOperator::OperatorClass;
using ArchTag = typename WarpMmaOperator::ArchTag;
using ThreadblockShape = typename BaseKernel::Mma::Shape;
using WarpShape = typename BaseKernel::WarpShape;
using InstructionShape = typename BaseKernel::InstructionShape;
static int const kStages = BaseKernel::Mma::kStages;
/// Argument structure
using Arguments = typename BaseKernel::Arguments;
using ProblemInfo = typename BaseKernel::ProblemVisitor::ProblemInfo;
protected:
/// Kernel parameters object
typename BaseKernel::Params params_;
private:
/// Get the number of tiles across all problems in a group
static int32_t group_tile_count(const cutlass::gemm::GemmCoord* problem_sizes_ptr, int problem_count) {
int32_t tiles = 0;
for (int32_t i = 0; i < problem_count; ++i) {
cutlass::gemm::GemmCoord problem = problem_sizes_ptr[i];
BaseKernel::ProblemVisitor::possibly_transpose_problem(problem);
tiles += problem_tile_count(problem);
}
return tiles;
}
/// Copy from `data` to `workspace`
Status copy_to_workspace(void* workspace, void* data, size_t bytes) {
cudaError_t cuda_error = cudaMemcpy(workspace, data, bytes, cudaMemcpyHostToDevice);
if (cuda_error != cudaSuccess) {
// Call cudaGetLastError() to clear the error bit
cuda_error = cudaGetLastError();
CUTLASS_TRACE_HOST(
" cudaMemcpy() returned error "
<< cudaGetErrorString(cuda_error));
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Precomputes scheduling information for the grouped GEMM
Status precompute(Arguments const &args, int32_t tile_count, void* workspace) {
size_t workspace_bytes = get_workspace_size(args);
std::vector<uint8_t> host_workspace(workspace_bytes);
BaseKernel::ProblemVisitor::host_precompute(args.host_problem_sizes,
args.problem_count,
args.threadblock_count,
(void*)host_workspace.data());
return copy_to_workspace(workspace, host_workspace.data(), workspace_bytes);
}
/// Reorder `data` according to `indices`
template <typename T>
static void reorder_array(T* data, const std::vector<size_t>& indices) {
// For now, simply create a copy of the data and then copy over to the original.
std::vector<T> copy(indices.size());
for (int i = 0; i < indices.size(); ++i) {
copy.at(i) = data[indices[i]];
}
memcpy(data, copy.data(), indices.size() * sizeof(T));
}
public:
/// Constructs the GEMM.
BaseGrouped() { }
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return BaseKernel::can_implement(args);
}
/// Get the number of tiles in a problem
static int32_t problem_tile_count(cutlass::gemm::GemmCoord const &problem) {
auto grid = BaseKernel::ProblemVisitor::grid_shape(problem);
return BaseKernel::ProblemVisitor::tile_count(grid);
}
/// Get the number of tiles across all problems in a group
static int32_t group_tile_count(Arguments const &args) {
if (args.host_problem_sizes == nullptr) {
CUTLASS_TRACE_HOST("Received nullptr for `args.host_problem_sizes");
return -1;
}
return group_tile_count(args.host_problem_sizes, args.problem_count);
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
if (BaseKernel::ProblemVisitor::kRequiresPrecomputation) {
return BaseKernel::ProblemVisitor::get_workspace_size(args.host_problem_sizes,
args.problem_count,
args.threadblock_count);
} else {
return 0;
}
}
/// Computes the grid shape
static dim3 get_grid_shape(Arguments const &args) {
return dim3(args.threadblock_count, 1, 1);
}
/// Computes the maximum number of active blocks per multiprocessor
static int maximum_active_blocks(int smem_capacity = -1) {
CUTLASS_TRACE_HOST("BaseGrouped::maximum_active_blocks()");
int smem_size = int(sizeof(typename BaseKernel::SharedStorage));
CUTLASS_TRACE_HOST(" smem_size: " << smem_size << " bytes");
cudaError_t result;
if (smem_size > (48 << 10)) {
result = cudaFuncSetAttribute(Kernel<BaseKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
// Call cudaGetLastError() to clear the error bit
result = cudaGetLastError();
CUTLASS_TRACE_HOST(
" cudaFuncSetAttribute() returned error "
<< cudaGetErrorString(result));
return -1;
}
}
int max_active_blocks = -1;
result = cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks,
Kernel<BaseKernel>,
BaseKernel::kThreadCount,
smem_size);
if (result != cudaSuccess) {
// Call cudaGetLastError() to clear the error bit
result = cudaGetLastError();
CUTLASS_TRACE_HOST(
" cudaOccupancyMaxActiveBlocksPerMultiprocessor() returned error "
<< cudaGetErrorString(result));
return -1;
}
CUTLASS_TRACE_HOST(" max_active_blocks: " << max_active_blocks);
return max_active_blocks;
}
/// Sorts each pointer passed in according to the indices that sort
/// `problem_sizes_ptr` in descending order of problem-K dimension.
static void sort_problems(int problem_count,
cutlass::gemm::GemmCoord* problem_sizes_ptr,
int64_t* lda_host_ptr,
int64_t* ldb_host_ptr,
int64_t* ldc_host_ptr,
int64_t* ldd_host_ptr,
int64_t* offset_A_ptr,
int64_t* offset_B_ptr,
int64_t* offset_C_ptr,
int64_t* offset_D_ptr)
{
std::vector<size_t> indices(problem_count);
std::iota(indices.begin(), indices.end(), 0);
std::stable_sort(indices.begin(), indices.end(),
[&problem_sizes_ptr](size_t i, size_t j) {
return problem_sizes_ptr[i].k() > problem_sizes_ptr[j].k();
});
reorder_array(problem_sizes_ptr, indices);
reorder_array(lda_host_ptr, indices);
reorder_array(ldb_host_ptr, indices);
reorder_array(ldc_host_ptr, indices);
reorder_array(ldd_host_ptr, indices);
reorder_array(offset_A_ptr, indices);
reorder_array(offset_B_ptr, indices);
reorder_array(offset_C_ptr, indices);
reorder_array(offset_D_ptr, indices);
}
/// Computes the number of threadblocks to launch for the grouped kernel
static int sufficient(const cutlass::gemm::GemmCoord* problem_sizes_ptr=nullptr,
int problem_count=0,
int available_sm_count=-1) {
// Determine the number of blocks that would be launched to fill up a single
// wave on the GPU with each SM having maximum occupancy.
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
// Call cudaGetLastError() to clear the error bit
result = cudaGetLastError();
CUTLASS_TRACE_HOST(" cudaGetDevice() returned error "
<< cudaGetErrorString(result));
return 0;
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
// Call cudaGetLastError() to clear the error bit
result = cudaGetLastError();
CUTLASS_TRACE_HOST(" cudaGetDeviceProperties() returned error "
<< cudaGetErrorString(result));
return 0;
}
bool override_sm_count = (available_sm_count < 0 || available_sm_count > properties.multiProcessorCount);
if (override_sm_count) {
available_sm_count = properties.multiProcessorCount;
}
int max_active_blocks = maximum_active_blocks();
if (max_active_blocks <= 0) {
return 0;
}
int occupancy_based_block_count = available_sm_count * max_active_blocks;
if (problem_sizes_ptr == nullptr || problem_count == 0) {
return occupancy_based_block_count;
}
int total_tiles = group_tile_count(problem_sizes_ptr, problem_count);
// If the group contains a single problem, launching the exact number of
// threadblocks needed to cover the problem minimizes the work performed
// per threadblock in finding the next tile to compute. We return total_tiles
// unless the user has provided the SM count.
if (problem_count == 1 && override_sm_count) {
return total_tiles;
}
// Choose between the full wave of threadblocks and the tile count. If there
// are fewer tiles in the group than threadblocks in the full wave, only
// some threadblocks will be assigned tiles. Those threadblocks
// which are not assigned tiles still need to perform the work of iterating through
// problem sizes to determine that they have no work to do. This competes for cycles
// with those threadblocks that are assigned tiles to compute.
return min(total_tiles, occupancy_based_block_count);
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
CUTLASS_TRACE_HOST("BaseGrouped::initialize() - workspace "
<< workspace << ", stream: " << (stream ? "non-null" : "null"));
// Workspace
size_t workspace_bytes = get_workspace_size(args);
if (workspace_bytes && !workspace) {
return Status::kErrorWorkspaceNull;
}
if (BaseKernel::ProblemVisitor::kRequiresPrecomputation) {
int32_t tile_count = group_tile_count(args);
Status status = precompute(args, tile_count, workspace);
if (status != Status::kSuccess) {
return status;
}
params_ = typename BaseKernel::Params(args, workspace, tile_count);
} else {
params_ = typename BaseKernel::Params(args, workspace);
}
// Specify shared memory capacity for kernel.
int smem_size = int(sizeof(typename BaseKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<BaseKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
size_t workspace_bytes = get_workspace_size(args);
if (workspace_bytes && !workspace) {
return Status::kErrorWorkspaceNull;
}
if (BaseKernel::ProblemVisitor::kRequiresPrecomputation) {
int32_t tile_count = group_tile_count(args);
Status status = precompute(args, tile_count, workspace);
if (status != Status::kSuccess) {
return status;
}
params_.update(args, workspace, tile_count);
} else {
params_.update(args, workspace);
}
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
//
// Configure grid and block dimensions
//
if (!params_.problem_visitor.problem_count) {
return Status::kSuccess;
}
dim3 grid(params_.threadblock_count, 1, 1);
dim3 block(BaseKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename BaseKernel::SharedStorage));
//
// Launch kernel
//
// Launch
cutlass::Kernel<BaseKernel><<<grid, block, smem_size, stream>>>(params_);
//
// Query for errors
//
cudaError_t result = cudaGetLastError();
if (result != cudaSuccess) {
// Call cudaGetLastError() to clear the error bit
result = cudaGetLastError();
CUTLASS_TRACE_HOST(" grid launch failed with error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Initializes and runs the kernel.
Status operator()(
Arguments const &args,
void *workspace,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 17,023 | C | 34.466667 | 109 | 0.644422 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/device/gemm_universal_base.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief The universal GEMM accommodates streamk, batched strided, and batched array variants.
*/
#pragma once
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_universal.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename GemmKernel_>
class GemmUniversalBase {
public:
using GemmKernel = GemmKernel_;
using ThreadblockShape = typename GemmKernel::Mma::Shape;
using ElementA = typename GemmKernel::ElementA;
using LayoutA = typename GemmKernel::LayoutA;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
static ComplexTransform const kTransformA = GemmKernel::kTransformA;
using ElementB = typename GemmKernel::ElementB;
using LayoutB = typename GemmKernel::LayoutB;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
static ComplexTransform const kTransformB = GemmKernel::kTransformB;
using ElementC = typename GemmKernel::ElementC;
using LayoutC = typename GemmKernel::LayoutC;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
/// Numerical accumulation element type
using ElementAccumulator = typename GemmKernel::Mma::ElementC;
using EpilogueOutputOp = typename GemmKernel::EpilogueOutputOp;
using ThreadblockSwizzle = typename GemmKernel::ThreadblockSwizzle;
using Operator = typename GemmKernel::Operator;
/// Argument structure
using Arguments = typename GemmKernel::Arguments;
protected:
//
// Device properties (uniform across all instances of the current thread)
//
// Device ordinal
thread_local static int device_ordinal_;
/// Device SM count
thread_local static int device_sms_;
/// Kernel SM occupancy (in thread blocks)
thread_local static int sm_occupancy_;
/// Initialize static thread-local members for the thread's current device,
/// if necessary.
static Status init_device_props()
{
CUTLASS_TRACE_HOST("GemmUniversalBase::init_device_props()");
cudaError_t cudart_result;
// Get current device ordinal
int current_ordinal;
cudart_result = cudaGetDevice(¤t_ordinal);
if (cudart_result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaGetDevice() returned error " << cudaGetErrorString(cudart_result));
return Status::kErrorInternal;
}
// Done if matches the current static member
if (current_ordinal == device_ordinal_) {
// Already initialized
return Status::kSuccess;
}
// Update SM count member
cudart_result = cudaDeviceGetAttribute (&device_sms_, cudaDevAttrMultiProcessorCount, current_ordinal);
if (cudart_result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaDeviceGetAttribute() returned error " << cudaGetErrorString(cudart_result));
return Status::kErrorInternal;
}
// Update the kernel function's shared memory configuration for the current device
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
if (smem_size >= (48 << 10))
{
// Requires more than 48KB: configure for extended, dynamic shared memory
cudart_result = cudaFuncSetAttribute(
Kernel2<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (cudart_result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaFuncSetAttribute() returned error " << cudaGetErrorString(cudart_result));
return Status::kErrorInternal;
}
cudart_result = cudaFuncSetAttribute(
Kernel2<GemmKernel>,
cudaFuncAttributePreferredSharedMemoryCarveout, 100); // 100% shared memory
if (cudart_result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaFuncSetAttribute() returned error " << cudaGetErrorString(cudart_result));
return Status::kErrorInternal;
}
}
// Update SM occupancy member
cudart_result = cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(
&sm_occupancy_,
Kernel2<GemmKernel>,
GemmKernel::kThreadCount,
int(sizeof(typename GemmKernel::SharedStorage)),
cudaOccupancyDisableCachingOverride);
if (cudart_result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags() returned error " << cudaGetErrorString(cudart_result));
return Status::kErrorInternal;
}
// Update device ordinal member on success
device_ordinal_ = current_ordinal;
CUTLASS_TRACE_HOST(" "
"device_ordinal: (" << device_ordinal_ << "), "
"device_sms: (" << device_sms_ << "), "
"sm_occupancy: (" << sm_occupancy_ << ")");
return Status::kSuccess;
}
protected:
//
// Instance data members
//
/// Kernel parameters
typename GemmKernel::Params params_;
/// Initialize params member
Status init_params(Arguments const &args)
{
// Initialize static device properties, if necessary
Status result = init_device_props();
if (result != Status::kSuccess) {
return result;
}
// Initialize params member
params_ = typename GemmKernel::Params(args, device_sms_, sm_occupancy_);
return Status::kSuccess;
}
public:
//---------------------------------------------------------------------------------------------
// Stateless API
//---------------------------------------------------------------------------------------------
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversalBase::can_implement()");
// Initialize static kernel and device properties, if necessary.
Status result = init_device_props();
if (result != Status::kSuccess) {
return result;
}
dim3 grid = get_grid_shape(args);
if (!(grid.y <= std::numeric_limits<uint16_t>::max() &&
grid.z <= std::numeric_limits<uint16_t>::max()))
{
return Status::kErrorInvalidProblem;
}
return GemmKernel::can_implement(args);
}
/// Returns the workspace size (in bytes) needed for the problem
/// geometry expressed by these arguments
static size_t get_workspace_size(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversalBase::get_workspace_size()");
// Initialize parameters from args
GemmUniversalBase base;
if (base.init_params(args) != Status::kSuccess) {
return 0;
}
// Get size from parameters
size_t workspace_bytes = base.params_.get_workspace_size();
CUTLASS_TRACE_HOST(" workspace_bytes: " << workspace_bytes);
return workspace_bytes;
}
/// Returns the grid extents in thread blocks to launch
static dim3 get_grid_shape(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversalBase::get_grid_shape()");
// Initialize parameters from args
GemmUniversalBase base;
if (base.init_params(args) != Status::kSuccess) {
return dim3(0,0,0);
}
// Get dims from parameters
dim3 grid_dims = base.params_.get_grid_dims();
CUTLASS_TRACE_HOST(
" tiled_shape: " << base.params_.get_tiled_shape() << "\n"
<< " grid_dims: {" << grid_dims << "}");
return grid_dims;
}
/// Returns the maximum number of active thread blocks per multiprocessor
static int maximum_active_blocks()
{
CUTLASS_TRACE_HOST("GemmUniversalBase::maximum_active_blocks()");
// Initialize static device properties, if necessary
if (init_device_props() != Status::kSuccess) {
return -1;
}
CUTLASS_TRACE_HOST(" max_active_blocks: " << sm_occupancy_);
return sm_occupancy_;
}
//---------------------------------------------------------------------------------------------
// Stateful API
//---------------------------------------------------------------------------------------------
/// Initializes GEMM state from arguments and workspace memory
Status initialize(
Arguments const &args,
void *workspace,
cudaStream_t stream = nullptr)
{
CUTLASS_TRACE_HOST("GemmUniversalBase::initialize() - workspace "
<< workspace << ", stream: " << (stream ? "non-null" : "null"));
// Initialize parameters from args
Status result = init_params(args);
if (result != Status::kSuccess) {
return result;
}
// Assign and prepare workspace memory
return params_.init_workspace(workspace, stream);
}
/// Lightweight update given a subset of arguments. Problem geometry is assumed to
/// remain the same.
Status update(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversalBase()::update()");
params_.update(args);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr)
{
CUTLASS_TRACE_HOST("GemmUniversalBase::run()");
// Configure grid and block dimensions
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
dim3 block(GemmKernel::kThreadCount, 1, 1);
dim3 grid = params_.get_grid_dims();
// Launch kernel
CUTLASS_TRACE_HOST(" "
"grid: (" << grid << "), "
"block: (" << block << "), "
"SMEM: (" << smem_size << ")");
Kernel2<GemmKernel><<<grid, block, smem_size, stream>>>(params_);
// Query for errors
cudaError_t result = cudaGetLastError();
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(" grid launch failed with error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr)
{
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr)
{
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Static initializers
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Device ordinal
template <typename GemmKernel_>
thread_local int GemmUniversalBase<GemmKernel_>::device_ordinal_ = -1;
/// Device SM count
template <typename GemmKernel_>
thread_local int GemmUniversalBase<GemmKernel_>::device_sms_ = -1;
/// Kernel SM occupancy (in thread blocks)
template <typename GemmKernel_>
thread_local int GemmUniversalBase<GemmKernel_>::sm_occupancy_ = -1;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 13,089 | C | 31.00489 | 140 | 0.631523 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/device/gemm_universal_adapter.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief The universal GEMM accommodates serial reductions, parallel reductions, batched strided, and
batched array variants.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal_base.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename GemmKernel_>
class GemmUniversalAdapter {
public:
using GemmKernel = GemmKernel_;
static bool const kInternalTranspose =
platform::is_same<typename GemmKernel::LayoutC, cutlass::layout::RowMajor>::value;
using ThreadblockShape = typename GemmKernel::Mma::Shape;
using WarpShape = typename GemmKernel::WarpShape;
using InstructionShape = typename GemmKernel::InstructionShape;
// warp-level, arch-level (instruction), math operator
using WarpMmaOperator = typename GemmKernel::Mma::Policy::Operator;
using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator;
using MathOperator = typename WarpMmaOperator::MathOperator;
// Operator class and arch tag extract bottom-up
// set it for top-level gemm device-level template
using OperatorClass = typename WarpMmaOperator::OperatorClass;
using ArchTag = typename WarpMmaOperator::ArchTag;
// Type, layout, and complex transform deliberately exchanged with B
using MapArguments = kernel::detail::MapArguments<
typename GemmKernel::ElementA,
typename GemmKernel::LayoutA,
GemmKernel::kTransformA,
GemmKernel::kAlignmentA,
typename GemmKernel::ElementB,
typename GemmKernel::LayoutB,
GemmKernel::kTransformB,
GemmKernel::kAlignmentB,
typename GemmKernel::LayoutC,
kInternalTranspose
>;
using ElementA = typename MapArguments::ElementA;
using LayoutA = typename MapArguments::LayoutA;
static ComplexTransform const kTransformA = MapArguments::kTransformA;
static int const kAlignmentA = MapArguments::kAlignmentA;
using ElementB = typename MapArguments::ElementB;
using LayoutB = typename MapArguments::LayoutB;
static ComplexTransform const kTransformB = MapArguments::kTransformB;
static int const kAlignmentB = MapArguments::kAlignmentB;
using ElementC = typename GemmKernel::ElementC;
using LayoutC = typename MapArguments::LayoutC;
static int const kAlignmentC = GemmKernel::kAlignmentC;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
static int const kStages = GemmKernel::Mma::kStages;
using EpilogueOutputOp = typename GemmKernel::EpilogueOutputOp;
using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator;
using ThreadblockSwizzle = typename GemmKernel::ThreadblockSwizzle;
using UnderlyingOperator = GemmUniversalBase<GemmKernel>;
using Arguments = typename UnderlyingOperator::Arguments;
private:
UnderlyingOperator underlying_operator_;
public:
/// Constructs the GEMM.
GemmUniversalAdapter() { }
/// Helper to construct a transposed equivalent for the underying GEMM operator
static Arguments to_underlying_arguments(Arguments const &args) {
if (kInternalTranspose) {
return args.transposed_problem();
}
else {
return args;
}
}
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Computes the grid shape
static dim3 get_grid_shape(Arguments const &args) {
return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args));
}
/// Computes the maximum number of active blocks per multiprocessor
static int maximum_active_blocks(int smem_capacity = -1) {
return UnderlyingOperator::maximum_active_blocks(smem_capacity);
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream);
}
/// Lightweight update given a subset of arguments. Problem geometry is assumed to
/// remain the same.
Status update(Arguments const &args) {
return underlying_operator_.update(to_underlying_arguments(args));
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 7,444 | C | 35.674877 | 102 | 0.692773 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemm_splitk_parallel.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts. Partial
specializations here choose 'device::GemmTransposed' to implement this functionality.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/gemm_splitk_parallel.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator
>
struct DefaultGemmSplitKParallel {
/// Define the threadblock-scoped matrix multiply-accumulate using the basic GEMM's
/// mainloop.
using Default = DefaultGemm<
ElementA_,
LayoutA_,
kAlignmentA,
ElementB_,
LayoutB_,
kAlignmentB,
ElementAccumulator,
LayoutC_,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
false,
Operator
>;
/// Define the matrix multiply operator
using Mma = typename Default::Mma;
/// Define the epilogue
using Epilogue = typename Default::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::GemmSplitKParallel<Mma, Epilogue, ThreadblockSwizzle>;
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| 4,932 | C | 35.007299 | 100 | 0.660178 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_planar_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmPlanarComplex {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
using Operator = typename Mma::Operator;
using ArchTag = typename Mma::ArchTag;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(
128 / sizeof_bits<ElementA>::value,
128 / sizeof_bits<ElementB>::value);
//
// Additional types needed for reflection
//
using ElementAccumulator = typename Mma::Policy::Operator::ElementC;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::Shape;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
//
// Arguments structure
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A_real;
void const * ptr_A_imag;
void const * ptr_B_real;
void const * ptr_B_imag;
void const * ptr_C_real;
void const * ptr_C_imag;
void * ptr_D_real;
void * ptr_D_imag;
typename LayoutA::Stride::Index lda_real;
typename LayoutA::Stride::Index lda_imag;
typename LayoutB::Stride::Index ldb_real;
typename LayoutB::Stride::Index ldb_imag;
typename LayoutC::Stride::Index ldc_real;
typename LayoutC::Stride::Index ldc_imag;
typename LayoutC::Stride::Index ldd_real;
typename LayoutC::Stride::Index ldd_imag;
int64_t batch_stride_A;
int64_t batch_stride_A_imag;
int64_t batch_stride_B;
int64_t batch_stride_B_imag;
int64_t batch_stride_C;
int64_t batch_stride_C_imag;
int64_t batch_stride_D_imag;
//
// Methods
//
Arguments() :
ptr_A_real(nullptr),
ptr_A_imag(nullptr),
ptr_B_real(nullptr),
ptr_B_imag(nullptr),
ptr_C_real(nullptr),
ptr_C_imag(nullptr),
ptr_D_real(nullptr),
ptr_D_imag(nullptr)
{}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A_real,
void const * ptr_A_imag,
void const * ptr_B_real,
void const * ptr_B_imag,
void const * ptr_C_real,
void const * ptr_C_imag,
void * ptr_D_real,
void * ptr_D_imag,
typename LayoutA::Stride::Index lda_real,
typename LayoutA::Stride::Index lda_imag,
typename LayoutB::Stride::Index ldb_real,
typename LayoutB::Stride::Index ldb_imag,
typename LayoutC::Stride::Index ldc_real,
typename LayoutC::Stride::Index ldc_imag,
typename LayoutC::Stride::Index ldd_real,
typename LayoutC::Stride::Index ldd_imag,
int64_t batch_stride_A = 0,
int64_t batch_stride_A_imag = 0,
int64_t batch_stride_B = 0,
int64_t batch_stride_B_imag = 0,
int64_t batch_stride_C = 0,
int64_t batch_stride_C_imag = 0,
int64_t batch_stride_D = 0,
int64_t batch_stride_D_imag = 0)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A_real(ptr_A_real),
ptr_A_imag(ptr_A_imag),
ptr_B_real(ptr_B_real),
ptr_B_imag(ptr_B_imag),
ptr_C_real(ptr_C_real),
ptr_C_imag(ptr_C_imag),
ptr_D_real(ptr_D_real),
ptr_D_imag(ptr_D_imag),
lda_real(lda_real),
lda_imag(lda_imag),
ldb_real(ldb_real),
ldb_imag(ldb_imag),
ldc_real(ldc_real),
ldc_imag(ldc_imag),
ldd_real(ldd_real),
ldd_imag(ldd_imag),
batch_stride_A(batch_stride_A),
batch_stride_A_imag(batch_stride_A_imag),
batch_stride_B(batch_stride_B),
batch_stride_B_imag(batch_stride_B_imag),
batch_stride_C(batch_stride_C),
batch_stride_C_imag(batch_stride_C_imag),
batch_stride_D_imag(batch_stride_D_imag)
{}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A_real, args.ptr_B_real);
std::swap(args.ptr_A_imag, args.ptr_B_imag);
std::swap(args.lda_real, args.ldb_real);
std::swap(args.lda_imag, args.ldb_imag);
std::swap(args.batch_stride_A, args.batch_stride_B);
std::swap(args.batch_stride_A_imag, args.batch_stride_B_imag);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>;
//
// Data members
//
typename Mma::IteratorA::Params params_A_real;
typename Mma::IteratorA::Params params_A_imag;
typename Mma::IteratorB::Params params_B_real;
typename Mma::IteratorB::Params params_B_imag;
typename Epilogue::OutputTileIterator::Params params_C_real;
typename Epilogue::OutputTileIterator::Params params_C_imag;
typename Epilogue::OutputTileIterator::Params params_D_real;
typename Epilogue::OutputTileIterator::Params params_D_imag;
typename EpilogueOutputOp::Params output_op;
void * ptr_A_real;
void * ptr_A_imag;
void * ptr_B_real;
void * ptr_B_imag;
void * ptr_C_real;
void * ptr_C_imag;
void * ptr_D_real;
void * ptr_D_imag;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_A_imag;
int64_t batch_stride_B_imag;
int64_t batch_stride_C_imag;
int64_t batch_stride_D_imag;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A_real(args.lda_real),
params_A_imag(args.lda_imag),
params_B_real(args.ldb_real),
params_B_imag(args.ldb_imag),
params_C_real(args.ldc_real),
params_C_imag(args.ldc_imag),
params_D_real(args.ldd_real),
params_D_imag(args.ldd_imag),
output_op(args.epilogue),
ptr_A_real(const_cast<void *>(args.ptr_A_real)),
ptr_A_imag(const_cast<void *>(args.ptr_A_imag)),
ptr_B_real(const_cast<void *>(args.ptr_B_real)),
ptr_B_imag(const_cast<void *>(args.ptr_B_imag)),
ptr_C_real(const_cast<void *>(args.ptr_C_real)),
ptr_C_imag(const_cast<void *>(args.ptr_C_imag)),
ptr_D_real(args.ptr_D_real),
ptr_D_imag(args.ptr_D_imag),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_A_imag(args.batch_stride_A_imag),
batch_stride_B_imag(args.batch_stride_B_imag),
batch_stride_C_imag(args.batch_stride_C_imag),
batch_stride_D_imag(args.batch_stride_D_imag)
{}
/// Returns the workspace size (in bytes) needed for this problem geometry
size_t get_workspace_size() const
{
size_t workspace_bytes = ParamsBase::get_workspace_size();
if (this->mode == GemmUniversalMode::kGemmSplitKParallel)
{
// Double the size returned by the base class because we need to
// accumulate two ElementC components
workspace_bytes *= 2;
}
return workspace_bytes;
}
/// Lightweight update given a subset of arguments. Problem geometry is assumed
/// to remain the same.
void update(Arguments const &args)
{
ptr_A_real = const_cast<void *>(args.ptr_A_real);
ptr_A_imag = const_cast<void *>(args.ptr_A_imag);
ptr_B_real = const_cast<void *>(args.ptr_B_real);
ptr_B_imag = const_cast<void *>(args.ptr_B_imag);
ptr_C_real = const_cast<void *>(args.ptr_C_real);
ptr_C_imag = const_cast<void *>(args.ptr_C_imag);
ptr_D_real = const_cast<void *>(args.ptr_D_real);
ptr_D_imag = const_cast<void *>(args.ptr_D_imag);
output_op = args.epilogue;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(Arguments const &args)
{
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = args.problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = args.problem_size.m() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = args.problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = args.problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = args.problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = args.problem_size.m() % kAlignmentC;
}
if (isAMisaligned || isBMisaligned || isCMisaligned) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmPlanarComplex op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A_real = static_cast<ElementA *>(params.ptr_A_real);
ElementA *ptr_A_imag = static_cast<ElementA *>(params.ptr_A_imag);
ElementB *ptr_B_real = static_cast<ElementB *>(params.ptr_B_real);
ElementB *ptr_B_imag = static_cast<ElementB *>(params.ptr_B_imag);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_A;
ptr_A_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_A_imag;
ptr_B_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_B;
ptr_B_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_B_imag;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A_real = static_cast<ElementA * const *>(params.ptr_A_real)[threadblock_tile_offset.k()];
ptr_A_imag = static_cast<ElementA * const *>(params.ptr_A_imag)[threadblock_tile_offset.k()];
ptr_B_real = static_cast<ElementB * const *>(params.ptr_B_real)[threadblock_tile_offset.k()];
ptr_B_imag = static_cast<ElementB * const *>(params.ptr_B_imag)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A_real(
params.params_A_real,
ptr_A_real,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorA iterator_A_imag(
params.params_A_imag,
ptr_A_imag,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B_real(
params.params_B_real,
ptr_B_real,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
typename Mma::IteratorB iterator_B_imag(
params.params_B_imag,
ptr_B_imag,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A_real,
iterator_A_imag,
iterator_B_real,
iterator_B_imag,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C_real = static_cast<ElementC *>(params.ptr_C_real);
ElementC *ptr_C_imag = static_cast<ElementC *>(params.ptr_C_imag);
ElementC *ptr_D_real = static_cast<ElementC *>(params.ptr_D_real);
ElementC *ptr_D_imag = static_cast<ElementC *>(params.ptr_D_imag);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D_real += threadblock_tile_offset.k() * params.batch_stride_D;
ptr_D_imag += threadblock_tile_offset.k() * params.batch_stride_D_imag;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_C;
ptr_C_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_C_imag;
ptr_D_real += int64_t(threadblock_tile_offset.k()) * params.batch_stride_D;
ptr_D_imag += int64_t(threadblock_tile_offset.k()) * params.batch_stride_D_imag;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C_real = static_cast<ElementC * const *>(params.ptr_C_real)[threadblock_tile_offset.k()];
ptr_C_imag = static_cast<ElementC * const *>(params.ptr_C_imag)[threadblock_tile_offset.k()];
ptr_D_real = static_cast<ElementC * const *>(params.ptr_D_real)[threadblock_tile_offset.k()];
ptr_D_imag = static_cast<ElementC * const *>(params.ptr_D_imag)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C_real(
params.params_C_real,
ptr_C_real,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_C_imag(
params.params_C_imag,
ptr_C_imag,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D_real(
params.params_D_real,
ptr_D_real,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_D_imag(
params.params_D_imag,
ptr_D_imag,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
//
// Construct epilogue
//
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C_real = iterator_D_real;
iterator_C_imag = iterator_D_imag;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D_real,
iterator_D_imag,
accumulators,
iterator_C_real,
iterator_C_imag);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 22,973 | C | 31.086592 | 108 | 0.636747 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_grouped_softmax_mainloop_fusion.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Problem visitor for grouped GEMMs with a softmax fused beforehand
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform
bool Transposed = false
>
struct GemmGroupedSoftmaxMainloopFusion {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_;
static bool const kTransposed = Transposed;
// Optional transpose
using MapArguments = kernel::detail::MapArguments<
typename Mma::IteratorA::Element,
typename Mma::IteratorA::Layout,
Mma::kTransformA,
Mma::IteratorA::AccessType::kElements,
typename Mma::IteratorB::Element,
typename Mma::IteratorB::Layout,
Mma::kTransformB,
Mma::IteratorB::AccessType::kElements,
typename Mma::LayoutC,
kTransposed
>;
// Public-facing type definitions related to operand element type, layout, and complex conjugate
// operation. Must interact with the 'kTransposed' notion.
using ElementA = typename MapArguments::ElementA;
using LayoutA = typename MapArguments::LayoutA;
using ElementB = typename MapArguments::ElementB;
using LayoutB = typename MapArguments::LayoutB;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename MapArguments::LayoutC;
using ElementScaleBias = typename Mma::IteratorNormSum::Element;
static ComplexTransform const kTransformA = MapArguments::kTransformA;
static ComplexTransform const kTransformB = MapArguments::kTransformB;
// Type definitions about the mainloop.
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = MapArguments::kAlignmentA;
static int const kAlignmentB = MapArguments::kAlignmentB;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using ProblemVisitor = GemmGroupedProblemVisitor<
ThreadblockShape,
kGroupScheduleMode,
kThreadCount,
kThreadCount,
kTransposed>;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord *problem_sizes;
int problem_count;
int threadblock_count;
typename EpilogueOutputOp::Params output_op;
ElementA ** ptr_A;
ElementB ** ptr_B;
ElementC ** ptr_C;
ElementC ** ptr_D;
void ** ptr_norm;
void ** ptr_sum;
typename LayoutA::Stride::LongIndex *lda;
typename LayoutB::Stride::LongIndex *ldb;
typename LayoutC::Stride::LongIndex *ldc;
typename LayoutC::Stride::LongIndex *ldd;
// Only used by device-level operator
GemmCoord *host_problem_sizes;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments():
problem_count(0),
threadblock_count(0),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
ptr_norm(nullptr),
ptr_sum(nullptr),
lda(nullptr),
ldb(nullptr),
ldc(nullptr),
ldd(nullptr),
host_problem_sizes(nullptr)
{
}
/// Ctor
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord *problem_sizes,
int problem_count,
int threadblock_count,
typename EpilogueOutputOp::Params output_op,
ElementA ** ptr_A,
ElementB ** ptr_B,
ElementC ** ptr_C,
ElementC ** ptr_D,
void ** ptr_norm,
void ** ptr_sum,
typename LayoutA::Stride::LongIndex *lda,
typename LayoutB::Stride::LongIndex *ldb,
typename LayoutC::Stride::LongIndex *ldc,
typename LayoutC::Stride::LongIndex *ldd,
GemmCoord *host_problem_sizes=nullptr
):
problem_sizes(problem_sizes),
problem_count(problem_count),
threadblock_count(threadblock_count),
output_op(output_op),
ptr_A(ptr_A),
ptr_B(ptr_B),
ptr_C(ptr_C),
ptr_D(ptr_D),
ptr_norm(ptr_norm),
ptr_sum(ptr_sum),
lda(lda),
ldb(ldb),
ldc(ldc),
ldd(ldd),
host_problem_sizes(host_problem_sizes)
{
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
typename ProblemVisitor::Params problem_visitor;
int threadblock_count;
typename EpilogueOutputOp::Params output_op;
ElementA ** ptr_A;
ElementB ** ptr_B;
ElementC ** ptr_C;
ElementC ** ptr_D;
void ** ptr_norm;
void ** ptr_sum;
typename LayoutA::Stride::LongIndex *lda;
typename LayoutB::Stride::LongIndex *ldb;
typename LayoutC::Stride::LongIndex *ldc;
typename LayoutC::Stride::LongIndex *ldd;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
ptr_norm(nullptr),
ptr_sum(nullptr),
lda(nullptr),
ldb(nullptr),
ldc(nullptr),
ldd(nullptr)
{ }
CUTLASS_HOST_DEVICE
Params(Arguments const &args,
void *workspace = nullptr,
int tile_count = 0):
problem_visitor(args.problem_sizes, args.problem_count, workspace, tile_count),
threadblock_count(args.threadblock_count),
output_op(args.output_op),
ptr_A(args.ptr_A),
ptr_B(args.ptr_B),
ptr_C(args.ptr_C),
ptr_D(args.ptr_D),
ptr_norm(args.ptr_norm),
ptr_sum(args.ptr_sum),
lda(args.lda),
ldb(args.ldb),
ldc(args.ldc),
ldd(args.ldd)
{
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr,
int tile_count = 0) {
problem_visitor = typename ProblemVisitor::Params(args.problem_sizes, args.problem_count,
workspace, tile_count);
threadblock_count = args.threadblock_count;
output_op = args.output_op;
ptr_A = args.ptr_A;
ptr_B = args.ptr_B;
ptr_C = args.ptr_C;
ptr_D = args.ptr_D;
ptr_norm = args.ptr_norm;
ptr_sum = args.ptr_sum;
lda = args.lda;
ldb = args.ldb;
ldc = args.ldc;
ldd = args.ldd;
}
};
/// Shared memory storage structure
struct SharedStorage {
union {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
} kernel;
// ProblemVisitor shared storage can't be overlapped with others
typename ProblemVisitor::SharedStorage problem_visitor;
};
public:
//
// Methods
//
CUTLASS_DEVICE
GemmGroupedSoftmaxMainloopFusion() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) {
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
//
// These types shadow the type-level definitions and support the ability to implement
// a 'transposed' GEMM that computes the transposed problems.
//
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
//
// Problem visitor.
//
ProblemVisitor problem_visitor(
params.problem_visitor,
shared_storage.problem_visitor,
blockIdx.x);
// Outer 'persistent' loop to iterate over tiles
while (problem_visitor.next_tile()) {
GemmCoord problem_size = problem_visitor.problem_size();
int32_t problem_idx = problem_visitor.problem_index();
int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx());
GemmCoord grid_shape = problem_visitor.grid_shape(problem_size);
cutlass::gemm::GemmCoord threadblock_offset(
int(threadblock_idx / grid_shape.n()) * Mma::Shape::kM,
int(threadblock_idx % grid_shape.n()) * Mma::Shape::kN,
0);
// Load element pointers. Exchange pointers and strides if working on the transpose
ElementA *ptr_A = reinterpret_cast<ElementA *>((kTransposed ? params.ptr_B[problem_idx] : params.ptr_A[problem_idx]));
typename LayoutA::LongIndex ldm_A = (kTransposed ? params.ldb[problem_idx] : params.lda[problem_idx]);
ElementB *ptr_B = reinterpret_cast<ElementB *>((kTransposed ? params.ptr_A[problem_idx] : params.ptr_B[problem_idx]));
typename LayoutB::LongIndex ldm_B = (kTransposed ? params.lda[problem_idx] : params.ldb[problem_idx]);
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_offset.m(),
0,
};
cutlass::MatrixCoord tb_offset_B{
0,
threadblock_offset.n()
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
LayoutA(ldm_A),
ptr_A,
{problem_size.m(), problem_size.k()},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
LayoutB(ldm_B),
ptr_B,
{problem_size.k(), problem_size.n()},
thread_idx,
tb_offset_B);
// Construct iterator to the softmax norm/sum vector
typename Mma::IteratorNormSum iterator_norm_sum(
problem_size.m(),
static_cast<ElementScaleBias const *>(params.ptr_norm[problem_idx]),
static_cast<ElementScaleBias const *>(params.ptr_sum[problem_idx]),
thread_idx,
MatrixCoord(0, threadblock_offset.m())
);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Matrix multiply phase
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.kernel.main_loop, thread_idx, warp_idx, lane_idx);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Wait for all threads to finish their epilogue phases from the previous tile.
__syncthreads();
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
iterator_norm_sum,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
ElementC *ptr_C = params.ptr_C[problem_idx];
ElementC *ptr_D = params.ptr_D[problem_idx];
LayoutC layout_C(params.ldc[problem_idx]);
LayoutC layout_D(params.ldd[problem_idx]);
typename Epilogue::OutputTileIterator::Params params_C(layout_C);
typename Epilogue::OutputTileIterator::Params params_D(layout_D);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params_C,
ptr_C,
problem_size.mn(),
thread_idx,
threadblock_offset.mn()
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params_D,
ptr_D,
problem_size.mn(),
thread_idx,
threadblock_offset.mn()
);
Epilogue epilogue(
shared_storage.kernel.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
// Next tile
problem_visitor.advance(gridDim.x);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 15,623 | C | 29.575342 | 124 | 0.630097 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/ell_gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a Block-Ell sparse gemm kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/semaphore.h"
#include "cutlass/arch/arch.h"
#include "cutlass/transform/threadblock/ell_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool SplitKSerial, ///! If true, code supporting split-K via serial reduction is enabled.
bool IsASparse ///! If true, A is sparse matrix
>
struct EllGemm {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static bool const kSplitKSerial = SplitKSerial;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorA::TensorRef ref_A;
typename Mma::IteratorB::Params params_B;
typename Mma::IteratorB::TensorRef ref_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::TensorRef ref_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::OutputTileIterator::TensorRef ref_D;
typename OutputOp::Params output_op;
int *semaphore;
int gemm_k_iterations;
int gemm_k_size;
const int* ell_idx;
int ell_ncol;
int ell_blocksize;
int ell_base_idx;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(): swizzle_log_tile(0), semaphore(0), gemm_k_iterations(0), gemm_k_size(0) { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
const int* ell_idx,
int ell_ncol,
int ell_blocksize,
int ell_base_idx,
typename OutputOp::Params output_op = typename OutputOp::Params(),
int *workspace = nullptr
):
problem_size(problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A.layout()),
ref_A(ref_A),
params_B(ref_B.layout()),
ref_B(ref_B),
params_C(ref_C.layout()),
ref_C(ref_C),
params_D(ref_D.layout()),
ref_D(ref_D),
output_op(output_op),
ell_idx(ell_idx),
ell_ncol(ell_ncol),
ell_blocksize(ell_blocksize),
ell_base_idx(ell_base_idx)
{
int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
semaphore = workspace;
}
};
/// Shared memory storage structure
struct SharedStorage {
union{
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
typename cutlass::transform::threadblock::ell::SharedStorage ell;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
EllGemm() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D) {
static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if (!TensorRef_aligned(ref_A, kAlignmentA)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_D, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int tile_in_ell_block = (params.ell_blocksize + Mma::Shape::kM - 1 ) / Mma::Shape::kM;
int ell_block_offset_m = threadblock_tile_offset.m() / tile_in_ell_block;
int tile_offset_m = threadblock_tile_offset.m() % tile_in_ell_block;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
typename Mma::FragmentC accumulators;
accumulators.clear();
// skip computation if matrix is 0
if (params.ell_ncol > 0) {
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
ell_block_offset_m * params.ell_blocksize
+ tile_offset_m * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
threadblock_tile_offset.n() * Mma::Shape::kN
};
int ell_idx_start =
(threadblock_tile_offset.m() / tile_in_ell_block) *
(params.ell_ncol / params.ell_blocksize);
const int* ell_idx_ptr = &(params.ell_idx[ell_idx_start]);
// Problem size is a function of threadblock index in the K dimension
int problem_size_k = min(
params.problem_size.k(),
(threadblock_tile_offset.k() + 1) * params.gemm_k_size);
problem_size_k = min(problem_size_k, params.ell_ncol);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations =
(problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Define coef for ELL index depending on LayoutB
int ell_stride = iterator_B.get_stride();
typename cutlass::transform::threadblock::ell::Iterator ell_iterator(
shared_storage.ell,
ell_idx_ptr,
params.ell_blocksize,
params.ell_base_idx,
Mma::Shape::kK,
problem_size_k,
ell_stride,
thread_idx
);
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
if (!kSplitKSerial || gemm_k_iterations > 0) {
// check if index computations can be skipped
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
constexpr bool is_double = (sizeof(Mma::IteratorA::Element) == 8);
constexpr bool is_multiple_alignment =
(kAlignmentA > 1) && (kAlignmentB > 1) && (kAlignmentC > 1);
const bool is_specialized_blocksize =
((params.ell_blocksize) & (params.ell_blocksize-1)) == 0
&& params.ell_blocksize >= Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
if ((is_double || is_multiple_alignment) && is_specialized_blocksize) {
mma.operator()<true, true>(
gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator);
}
else {
mma.operator()<true, false>(
gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator);
}
}
} // if (params.ell_ncols > 0)
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
ell_block_offset_m = threadblock_tile_offset.m() / tile_in_ell_block;
tile_offset_m = threadblock_tile_offset.m() % tile_in_ell_block;
//assume identity swizzle
MatrixCoord threadblock_offset(
ell_block_offset_m * params.ell_blocksize
+ tile_offset_m * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
//avoid out of bounds
MatrixCoord threadblock_extent(
min(params.problem_size.m(),
ell_block_offset_m * params.ell_blocksize
+ min((tile_offset_m + 1) * Mma::Shape::kM, params.ell_blocksize)),
min(params.problem_size.n(),
(threadblock_tile_offset.n()+1) * Mma::Shape::kN)
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// If performing a reduction via split-K, fetch the initial synchronization
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
params.ref_C.data(),
threadblock_extent,
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
threadblock_extent,
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op, iterator_D, accumulators, iterator_C);
//
// Release the semaphore
//
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
// B is Sparse
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled.
>
struct EllGemm<Mma_, Epilogue_, ThreadblockSwizzle_, SplitKSerial, false> {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static bool const kSplitKSerial = SplitKSerial;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorA::TensorRef ref_A;
typename Mma::IteratorB::Params params_B;
typename Mma::IteratorB::TensorRef ref_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::TensorRef ref_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::OutputTileIterator::TensorRef ref_D;
typename OutputOp::Params output_op;
int *semaphore;
int gemm_k_iterations;
int gemm_k_size;
const int* ell_idx;
int ell_ncol;
int ell_blocksize;
int ell_base_idx;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(): swizzle_log_tile(0), semaphore(0), gemm_k_iterations(0), gemm_k_size(0) { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
const int* ell_idx,
int ell_ncol,
int ell_blocksize,
int ell_base_idx,
typename OutputOp::Params output_op = typename OutputOp::Params(),
int *workspace = nullptr
):
problem_size(problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A.layout()),
ref_A(ref_A),
params_B(ref_B.layout()),
ref_B(ref_B),
params_C(ref_C.layout()),
ref_C(ref_C),
params_D(ref_D.layout()),
ref_D(ref_D),
output_op(output_op),
ell_idx(ell_idx),
ell_ncol(ell_ncol),
ell_blocksize(ell_blocksize),
ell_base_idx(ell_base_idx)
{
int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
semaphore = workspace;
}
};
/// Shared memory storage structure
struct SharedStorage {
union{
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
typename cutlass::transform::threadblock::ell::SharedStorage ell;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
EllGemm() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D) {
static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if (!TensorRef_aligned(ref_A, kAlignmentA)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_D, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int tile_in_ell_block = (params.ell_blocksize + Mma::Shape::kN - 1 ) / Mma::Shape::kN;
int ell_block_offset_n = threadblock_tile_offset.n() / tile_in_ell_block;
int tile_offset_n = threadblock_tile_offset.n() % tile_in_ell_block;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
typename Mma::FragmentC accumulators;
accumulators.clear();
// skip computation if matrix is 0
if (params.ell_ncol > 0) {
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size,
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
ell_block_offset_n * params.ell_blocksize
+ tile_offset_n * Mma::Shape::kN,
};
int ell_idx_start =
(threadblock_tile_offset.n() / tile_in_ell_block) *
(params.ell_ncol / params.ell_blocksize);
const int* ell_idx_ptr = &(params.ell_idx[ell_idx_start]);
// Problem size is a function of threadblock index in the K dimension
int problem_size_k = min(
params.problem_size.k(),
(threadblock_tile_offset.k() + 1) * params.gemm_k_size);
problem_size_k = min(problem_size_k, params.ell_ncol);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations =
(problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Define coef for ELL index depending on LayoutA
int ell_stride = iterator_A.get_stride();
typename cutlass::transform::threadblock::ell::Iterator ell_iterator(
shared_storage.ell,
ell_idx_ptr,
params.ell_blocksize,
params.ell_base_idx,
Mma::Shape::kK,
problem_size_k,
ell_stride,
thread_idx
);
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
if (!kSplitKSerial || gemm_k_iterations > 0) {
// check if index computations can be skipped
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
constexpr bool is_double = (sizeof(Mma::IteratorA::Element) == 8);
constexpr bool is_multiple_alignment =
(kAlignmentA > 1) && (kAlignmentB > 1) && (kAlignmentC > 1);
const bool is_specialized_blocksize =
((params.ell_blocksize) & (params.ell_blocksize-1)) == 0
&& params.ell_blocksize >= Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
if ((is_double || is_multiple_alignment) && is_specialized_blocksize) {
mma.operator()<false, true>(
gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator);
}
else {
mma.operator()<false, false>(
gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator);
}
}
} // if (params.ell_ncols > 0)
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
ell_block_offset_n = threadblock_tile_offset.n() / tile_in_ell_block;
tile_offset_n = threadblock_tile_offset.n() % tile_in_ell_block;
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
ell_block_offset_n * params.ell_blocksize
+ tile_offset_n * Mma::Shape::kN
);
//avoid out of bounds
MatrixCoord threadblock_extent(
min(params.problem_size.m(),
(threadblock_tile_offset.m()+1) * Mma::Shape::kM),
min(params.problem_size.n(),
ell_block_offset_n * params.ell_blocksize
+ min((tile_offset_n + 1) * Mma::Shape::kN, params.ell_blocksize))
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// If performing a reduction via split-K, fetch the initial synchronization
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
params.ref_C.data(),
threadblock_extent,
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
threadblock_extent,
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op, iterator_D, accumulators, iterator_C);
//
// Release the semaphore
//
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 28,916 | C | 33.797834 | 108 | 0.619069 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_rank_2k.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level Rank2K definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/rank_2k_universal.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op_blas3.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_ = BlasMode::kSymmetric>
struct DefaultRank2K;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator>
struct DefaultRank2K<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC,layout::RowMajor, FillModeC,
ElementAccumulator, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator> {
/// Define the threadblock-scoped matrix multiply-accumulate (A x BT)
using Mma1 = typename cutlass::gemm::threadblock::DefaultMma<
ElementA, LayoutA,
kAlignmentA,
ElementB, typename layout::LayoutTranspose<LayoutB>::type,
kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator>::ThreadblockMma;
/// Define the threadblock-scoped matrix multiply-accumulate (B x AT)
using Mma2 = typename cutlass::gemm::threadblock::DefaultMma<
ElementB, LayoutB,
kAlignmentB,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
kAlignmentA,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpBlas3<
ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount, BlasMode::kSymmetric>::Epilogue;
/// Define the kernel-level Rank2K operator.
using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, BlasMode::kSymmetric>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator>
struct DefaultRank2K<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC,layout::RowMajor, FillModeC,
ElementAccumulator, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator> {
/// Define the threadblock-scoped matrix multiply-accumulate (A x BT)
using Mma1 = typename cutlass::gemm::threadblock::DefaultMma<
ElementA, LayoutA,
kAlignmentA,
ElementB, typename layout::LayoutTranspose<LayoutB>::type,
kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator>::ThreadblockMma;
/// Define the threadblock-scoped matrix multiply-accumulate (B x AT)
using Mma2 = typename cutlass::gemm::threadblock::DefaultMma<
ElementB, LayoutB,
kAlignmentB,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
kAlignmentA,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpBlas3<
ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount, BlasMode::kSymmetric>::Epilogue;
/// Define the kernel-level Rank2K operator.
using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, BlasMode::kSymmetric>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 11,560 | C | 39.423077 | 122 | 0.678287 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemm_with_reduction.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Defines a GEMM with Reduction based on an existing UniversalGemm kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/kernel/gemm_with_fused_epilogue.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_reduction.h"
#include "cutlass/epilogue/threadblock/epilogue_with_reduction.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Epilogue reduction operator
typename EpilogueReductionOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
///
typename Enable = void
>
struct DefaultGemmWithReduction {
using GemmBase = typename DefaultGemmUniversal<
ElementA_, LayoutA_, TransformA, kAlignmentA,
ElementB_, LayoutB_, TransformB, kAlignmentB,
ElementC_, LayoutC_, ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator,
SharedMemoryClearOption::kClearLastStage
>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementC_,
EpilogueOutputOp,
EpilogueReductionOp,
GemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = GemmWithFusedEpilogue<
typename GemmBase::Mma,
Epilogue,
ThreadblockSwizzle
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parital specialization: ArchTag = cutlass::arch::Sm70
///
///
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Epilogue reduction operator
typename EpilogueReductionOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
///
typename Enable
>
struct DefaultGemmWithReduction<
ElementA_, LayoutA_, TransformA, kAlignmentA,
ElementB_, LayoutB_, TransformB, kAlignmentB,
ElementC_, LayoutC_,
ElementAccumulator,
OperatorClass,
cutlass::arch::Sm70,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
EpilogueReductionOp,
ThreadblockSwizzle,
Stages,
Operator,
Enable
> {
using GemmBase = typename DefaultGemmUniversal<
ElementA_, LayoutA_, TransformA, kAlignmentA,
ElementB_, LayoutB_, TransformB, kAlignmentB,
ElementC_, LayoutC_, ElementAccumulator,
OperatorClass,
cutlass::arch::Sm70,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator
>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionVoltaTensorOp<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementC_,
EpilogueOutputOp,
EpilogueReductionOp,
GemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = GemmWithFusedEpilogue<
typename GemmBase::Mma,
Epilogue,
ThreadblockSwizzle
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,086 | C | 31.740891 | 102 | 0.681796 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/rank_2k_transpose_operands.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Transpositions for Rank2K problems.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA_,
typename LayoutA_,
ComplexTransform TransformA,
int AlignmentA,
typename ElementB_,
typename LayoutB_,
ComplexTransform TransformB,
int AlignmentB,
typename LayoutC_,
FillMode FillModeC_,
bool Transpose
>
struct Rank2KMapArguments {
using ElementA = ElementA_;
using LayoutA = LayoutA_;
static ComplexTransform const kTransformA = TransformA;
static int const kAlignmentA = AlignmentA;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
static ComplexTransform const kTransformB = TransformB;
static int const kAlignmentB = AlignmentB;
using LayoutC = LayoutC_;
static FillMode const kFillModeC = FillModeC_;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA_,
typename LayoutA_,
ComplexTransform TransformA,
int AlignmentA,
typename ElementB_,
typename LayoutB_,
ComplexTransform TransformB,
int AlignmentB,
typename LayoutC_,
FillMode FillModeC_
>
struct Rank2KMapArguments<
ElementA_,
LayoutA_,
TransformA,
AlignmentA,
ElementB_,
LayoutB_,
TransformB,
AlignmentB,
LayoutC_,
FillModeC_,
true
> {
using ElementA = ElementB_;
using LayoutA = LayoutB_;
static ComplexTransform const kTransformA = TransformB;
static int const kAlignmentA = AlignmentB;
using ElementB = ElementA_;
using LayoutB = LayoutA_;
static ComplexTransform const kTransformB = TransformA;
static int const kAlignmentB = AlignmentA;
using LayoutC = typename layout::LayoutTranspose<LayoutC_>::type;
static FillMode const kFillModeC = InvertFillMode<FillModeC_>::mode;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 4,334 | C | 32.346154 | 100 | 0.601061 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/rank_2k_universal.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma1_, ///! Threadblock-scoped matrix multiply-accumulate (A*B^T)
typename Mma2_, ///! Threadblock-scoped matrix multiply-accumulate (B*A^T)
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
FillMode FillModeC_, ///! Fill Mode for C (kLower or kUpper)
BlasMode BlasMode_ ///! Blas3 computation mode
>
struct Rank2KUniversal {
public:
using Mma1 = Mma1_;
using Mma2 = Mma2_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma1::IteratorA::Element;
using ElementB = typename Mma1::IteratorB::Element;
// Mma1 (A x B^T)
using LayoutA = typename Mma1::IteratorA::Layout;
using LayoutBT = typename Mma1::IteratorB::Layout;
static ComplexTransform const kMma1TransformA = Mma1::kTransformA;
static ComplexTransform const kMma1TransformB = Mma1::kTransformB;
// Mma2 (B x A^T)
using LayoutB = typename Mma2::IteratorA::Layout;
using LayoutAT = typename Mma2::IteratorB::Layout;
static ComplexTransform const kMma2TransformA = Mma2::kTransformA;
static ComplexTransform const kMma2TransformB = Mma2::kTransformB;
// Common type definitions for Mma1 and Mma2
using Operator = typename Mma1::Operator;
using OperatorClass = typename Mma1::Operator::OperatorClass;
using ThreadblockShape = typename Mma1::Shape;
using WarpShape = typename Mma1::Operator::Shape;
using InstructionShape = typename Mma1::Policy::Operator::InstructionShape;
using ArchTag = typename Mma1::ArchTag;
static int const kStages = Mma1::kStages;
static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements;
// Output related typedefinitions
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static FillMode const kFillModeC = FillModeC_;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
static BlasMode const kBlasMode = BlasMode_;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma1::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode;
GemmCoord problem_size;
int batch_count;
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc;
typename LayoutC::Stride::Index ldd;
//
// Methods
//
Arguments():
mode(GemmUniversalMode::kGemm),
batch_count(1),
ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr) { }
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd) {
}
/// Returns arguments for a the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
// Mma1 Iterator A and B params
typename Mma1::IteratorA::Params params_A;
typename Mma1::IteratorB::Params params_BT;
// Mma2 Iterator A and B params
typename Mma2::IteratorA::Params params_B;
typename Mma2::IteratorB::Params params_AT;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
GemmUniversalMode mode;
int batch_count;
int gemm_k_size;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
int *semaphore;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
swizzle_log_tile(0),
params_A(0),
params_BT(0),
params_B(0),
params_AT(0),
params_C(0),
params_D(0),
batch_count(0),
gemm_k_size(0),
mode(cutlass::gemm::GemmUniversalMode::kGemm),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
batch_stride_A(0),
batch_stride_B(0),
batch_stride_C(0),
batch_stride_D(0),
semaphore(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
int gemm_k_size,
void *workspace = nullptr
):
problem_size(args.problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(args.lda),
params_BT(args.ldb),
params_B(args.ldb),
params_AT(args.lda),
params_C(args.ldc),
params_D(args.ldd),
output_op(args.epilogue),
mode(args.mode),
batch_count(args.batch_count),
gemm_k_size(gemm_k_size),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(const_cast<void *>(args.ptr_D)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
semaphore(static_cast<int *>(workspace)) {
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr) {
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
output_op = args.epilogue;
semaphore = static_cast<int *>(workspace);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma1::SharedStorage mma1_main_loop;
typename Mma2::SharedStorage mma2_main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Methods
//
CUTLASS_DEVICE
Rank2KUniversal() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Early exit if Fill Mode is Lower and
// if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal)
if (kFillModeC == cutlass::FillMode::kLower &&
(threadblock_tile_offset.m() + 1) * Mma1::Shape::kM <= threadblock_tile_offset.n() * Mma1::Shape::kN) {
return;
}
// Early exit if Fill Mode is Upper and
// if the entire tile is below the main diagonal (top-right corner is at or below the diagonal)
if (kFillModeC == cutlass::FillMode::kUpper &&
threadblock_tile_offset.m() * Mma1::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) {
return;
}
bool tile_on_diagonal = false;
// Mark tiles that are being crossed by the main diagonal
// (top-right and bottom-left corners are on either side of the diagonal)
if ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM > threadblock_tile_offset.n() * Mma1::Shape::kN
&& threadblock_tile_offset.m() * Mma1::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) {
tile_on_diagonal = true;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_MxK{
threadblock_tile_offset.m() * Mma1::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_KxN{
offset_k,
threadblock_tile_offset.n() * Mma1::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands for Mma1
typename Mma1::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK);
typename Mma1::IteratorB iterator_BT(
params.params_BT,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_KxN);
// Construct iterators to A and B operands for Mma2
typename Mma2::IteratorA iterator_B(
params.params_B,
ptr_B,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK);
typename Mma2::IteratorB iterator_AT(
params.params_AT,
ptr_A,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_KxN);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply for Mma1 (A x BT)
Mma1 mma1(shared_storage.mma1_main_loop, thread_idx, warp_idx, lane_idx);
// Construct thread-scoped matrix multiply for Mma2 (B x AT)
Mma2 mma2(shared_storage.mma2_main_loop, thread_idx, warp_idx, lane_idx);
typename Mma1::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
// Compute threadblock-scoped matrix multiply-add (A x BT)
mma1(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_BT,
accumulators);
// HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK.
if (kBlasMode == BlasMode::kHermitian) {
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma1::Shape::kM,
threadblock_tile_offset.n() * Mma1::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// If CTA not on diagonal, FillMode doesn't apply.
FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone;
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
__syncthreads();
accumulators.clear();
}
// Compute threadblock-scoped matrix multiply-add (B x AT)
mma2(
gemm_k_iterations,
accumulators,
iterator_B,
iterator_AT,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
/* Needed for HER2K where the second HERK is multiplied by conj(alpha) */
typename EpilogueOutputOp::Params second_her2k_params(conj(params.output_op.alpha), 1);
EpilogueOutputOp output_op_her2k(second_her2k_params);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma1::Shape::kM,
threadblock_tile_offset.n() * Mma1::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
// HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK.
if (kBlasMode == BlasMode::kHermitian) {
ptr_C = static_cast<ElementC *>(params.ptr_D);
}
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
if (kBlasMode == BlasMode::kSymmetric) {
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
} else {
output_op_her2k.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// If CTA not on diagonal, FillMode doesn't apply.
FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone;
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
if (kBlasMode == BlasMode::kSymmetric) {
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
} else {
epilogue(
output_op_her2k,
iterator_D,
accumulators,
iterator_C);
}
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 24,162 | C | 30.017972 | 113 | 0.624907 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_splitk_parallel.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for GEMM performing a reduction over K partitions in parallel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmSplitKParallel {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
static int const kAlignmentK = Mma::Operator::Shape::kK;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorA::TensorRef ref_A;
typename Mma::IteratorB::Params params_B;
typename Mma::IteratorB::TensorRef ref_B;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::OutputTileIterator::TensorRef ref_D;
typename OutputOp::Params output_op;
int64_t splitk_slice_stride;
int gemm_k_size;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(): swizzle_log_tile(0) { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
typename OutputOp::Params output_op,
int64_t splitk_slice_stride
):
problem_size(problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A.layout()),
ref_A(ref_A),
params_B(ref_B.layout()),
ref_B(ref_B),
params_D(ref_D.layout()),
ref_D(ref_D),
output_op(output_op),
splitk_slice_stride(splitk_slice_stride) {
int full_gemm_k_iterations = problem_size.k() / Mma::Shape::kK;
int gemm_k_iterations = full_gemm_k_iterations / grid_tiled_shape.k();
gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
GemmSplitKParallel() { }
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size,
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Problem size is a function of threadblock index in the K dimension
int problem_size_k;
if (threadblock_tile_offset.k() + 1 == params.grid_tiled_shape.k()) {
problem_size_k = params.problem_size.k();
}
else {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
int warp_idx = threadIdx.x / 32;
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
iterator_D.add_pointer_offset(params.splitk_slice_stride * threadblock_tile_offset.k());
// Execute the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Run efficient epilogue
epilogue(output_op, iterator_D, accumulators, iterator_D);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 8,142 | C | 31.059055 | 106 | 0.640138 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_ell_gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Default kernel-level Blocked-Ell sparse gemm operators.
This operator combines threadblock-scoped ELL MMA
with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm.h"
#include "cutlass/gemm/kernel/gemm_pipelined.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
#include "cutlass/gemm/kernel/ell_gemm.h"
#include "cutlass/gemm/threadblock/default_ell_mma.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse>
struct DefaultEllGemm;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator, IsASparse> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Turing Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC, layout::RowMajor,
ElementAccumulator,
arch::OpClassTensorOp,
arch::Sm75,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
SplitKSerial,
Operator,
IsASparse
> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
layout::RowMajor,
arch::OpClassTensorOp,
arch::Sm75,
ThreadblockShape,
WarpShape,
InstructionShape,
2,
Operator
>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
typename Mma::Operator,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Integer Matrix Multiply Interleaved layout
template <
/// Element type for A matrix operand
typename ElementA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Number of Interleaved k
int InterleavedK,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse>
struct DefaultEllGemm<
ElementA, layout::ColumnMajorInterleaved<InterleavedK>, kAlignmentA,
ElementB, layout::RowMajorInterleaved<InterleavedK>, kAlignmentB, ElementC,
layout::ColumnMajorInterleaved<InterleavedK>, int32_t,
arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape,
InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages,
SplitKSerial, Operator, IsASparse> {
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementAccumulator = int32_t;
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, LayoutC, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages, Operator,
true>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::
DefaultInterleavedEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Turing Integer Matrix Multiply Interleaved layout
template <
/// Element type for A matrix operand
typename ElementA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of Interleaved k
int InterleavedK,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse>
struct DefaultEllGemm<ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
kAlignmentA, ElementB,
layout::RowMajorInterleaved<InterleavedK>, kAlignmentB,
ElementC, layout::ColumnMajorInterleaved<InterleavedK>,
int32_t, arch::OpClassTensorOp, arch::Sm75, ThreadblockShape,
WarpShape, InstructionShape, EpilogueOutputOp,
ThreadblockSwizzle, 2, SplitKSerial, Operator, IsASparse> {
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementAccumulator = int32_t;
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassTensorOp, arch::Sm75, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, true>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::
DefaultInterleavedEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Volta architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC, layout::RowMajor,
ElementAccumulator,
arch::OpClassTensorOp,
arch::Sm70,
ThreadblockShape,
WarpShape,
GemmShape<8, 8, 4>,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
SplitKSerial,
Operator,
IsASparse
> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
layout::RowMajor,
arch::OpClassTensorOp,
arch::Sm70,
ThreadblockShape,
WarpShape,
GemmShape<8, 8, 4>,
2,
Operator
>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
ThreadblockShape,
typename Mma::Operator,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for SIMT
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
layout::RowMajor,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
GemmShape<1, 1, 1>,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
SplitKSerial,
Operator,
IsASparse> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
layout::RowMajor,
arch::OpClassSimt,
arch::Sm50,
ThreadblockShape,
WarpShape,
GemmShape<1, 1, 1>,
2,
Operator>::ThreadblockMma;
static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
kEpilogueElementsPerAccess
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages
int Stages,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
layout::RowMajor,
ElementAccumulator,
arch::OpClassSimt,
arch::Sm80,
ThreadblockShape,
WarpShape,
GemmShape<1, 1, 1>,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
IsASparse> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassSimt, arch::Sm80,
ThreadblockShape, WarpShape, GemmShape<1, 1, 1>, Stages,
Operator>::ThreadblockMma;
static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
kEpilogueElementsPerAccess
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial,IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for SIMT DP4A
template <
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Layout type for C matrix operand
typename LayoutC,
/// Element type for C and D matrix operands
typename ElementC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<int8_t, LayoutA, kAlignmentA, int8_t, LayoutB, kAlignmentB,
ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt,
ArchTag, ThreadblockShape, WarpShape, GemmShape<1, 1, 4>,
EpilogueOutputOp, ThreadblockSwizzle, 2, SplitKSerial,
Operator, IsASparse> {
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using ElementB = int8_t;
using OperatorClass = arch::OpClassSimt;
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
LayoutC,
arch::OpClassSimt,
arch::Sm50,
ThreadblockShape,
WarpShape,
InstructionShape,
2,
Operator
>::ThreadblockMma;
static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
kEpilogueElementsPerAccess
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Wmma Gemm Kernel
template <
///< Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC, LayoutC,
ElementAccumulator,
arch::OpClassWmmaTensorOp,
ArchTag,
ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
IsASparse> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWmmaTensorOp<
ThreadblockShape,
typename Mma::Operator,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 29,360 | C | 34.036993 | 100 | 0.678134 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/grouped_problem_visitor.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Base scheduler for grouped problems
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumerated type describing the type of scheduling to perform for the ProblemVisitor
enum class GroupScheduleMode {
// Perform all scheduling on device
kDeviceOnly,
// Precompute on the host the full sequence of problems to access
kHostPrecompute
};
/// Visitor class to abstract away the algorithm for iterating over tiles
template <typename ProblemSizeHelper,
typename ThreadblockShape_>
struct BaseGroupedProblemVisitor {
using ThreadblockShape = ThreadblockShape_;
struct ProblemInfo {
static int32_t const kNoPrefetchEntry = -1;
int32_t problem_idx;
int32_t problem_start;
CUTLASS_DEVICE
ProblemInfo() : problem_idx(kNoPrefetchEntry), problem_start(kNoPrefetchEntry) {}
CUTLASS_DEVICE
ProblemInfo(int32_t problem_idx_, int32_t problem_start_) :
problem_idx(problem_idx_), problem_start(problem_start_) {}
};
struct Params {
cutlass::gemm::GemmCoord const *problem_sizes;
int32_t problem_count;
void const *workspace;
int32_t tile_count;
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
Params(): problem_sizes(nullptr), problem_count(0), workspace(nullptr), tile_count(0) { }
/// Ctor
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const *problem_sizes,
int32_t problem_count,
void const *workspace = nullptr,
int32_t tile_count = 0
):
problem_sizes(problem_sizes),
problem_count(problem_count),
workspace(workspace),
tile_count(tile_count)
{}
};
Params const ¶ms;
int32_t tile_idx;
int32_t problem_tile_start;
int32_t problem_idx;
//
// Methods
//
CUTLASS_DEVICE
BaseGroupedProblemVisitor(
Params const ¶ms_,
int32_t block_idx
):
params(params_),
tile_idx(block_idx),
problem_tile_start(0),
problem_idx(0)
{}
/// Get the grid shape
CUTLASS_HOST_DEVICE
static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) {
return ProblemSizeHelper::grid_shape(problem);
}
/// Gets the global tile index
CUTLASS_HOST_DEVICE
int32_t tile_index() const {
return tile_idx;
}
/// Gets the index of the problem
CUTLASS_HOST_DEVICE
int32_t problem_index() const {
return problem_idx;
}
CUTLASS_HOST_DEVICE
int32_t threadblock_idx() const {
return tile_idx - problem_tile_start;
}
CUTLASS_DEVICE
void advance(int32_t grid_size) {
tile_idx += grid_size;
}
CUTLASS_HOST_DEVICE
static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {
ProblemSizeHelper::possibly_transpose_problem(problem);
}
/// Returns the problem size for the current problem
CUTLASS_HOST_DEVICE
cutlass::gemm::GemmCoord problem_size() const {
GemmCoord problem = params.problem_sizes[problem_idx];
ProblemSizeHelper::possibly_transpose_problem(problem);
return problem;
}
CUTLASS_HOST_DEVICE
static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) {
return ProblemSizeHelper::tile_count(grid);
}
static int32_t group_tile_count(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count) {
int32_t total_tiles = 0;
for (int32_t i = 0; i < problem_count; ++i) {
auto problem = host_problem_sizes_ptr[i];
possibly_transpose_problem(problem);
auto grid = grid_shape(problem);
total_tiles += tile_count(grid);
}
return total_tiles;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ProblemSizeHelper,
typename ThreadblockShape,
GroupScheduleMode GroupScheduleMode_,
int PrefetchTileCount,
int ThreadCount
>
struct GroupedProblemVisitor;
/////////////////////////////////////////////////////////////////////////////////////////////////
// ProblemVisitor that performs all scheduling on device
//
template <typename ProblemSizeHelper,
typename ThreadblockShape,
int PrefetchTileCount,
int ThreadCount>
struct GroupedProblemVisitor<ProblemSizeHelper,
ThreadblockShape,
GroupScheduleMode::kDeviceOnly,
PrefetchTileCount,
ThreadCount>: public BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape> {
using Base = BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape>;
using Params = typename Base::Params;
static int const kThreadCount = ThreadCount;
static bool const kRequiresPrecomputation = false;
static int const kThreadsPerWarp = 32;
struct SharedStorage {};
// Final tile of the problem loaded by this thread. Each thread will hold
// a separate value.
int32_t problem_ending_tile;
SharedStorage &shared_storage;
//
// Methods
//
CUTLASS_DEVICE
GroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base(params_, block_idx),
problem_ending_tile(0),
shared_storage(shared_storage_)
{
this->problem_idx = -1 * kThreadsPerWarp;
this->problem_tile_start = 0;
}
CUTLASS_DEVICE
bool next_tile() {
// Check whether the tile to compute is within the range of the current problem.
int32_t problem_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, this->problem_idx % kThreadsPerWarp);
if (this->tile_idx < problem_tile_end) {
return true;
}
// Check whether the tile to compute is within the current group of problems fetched by the warp.
// The last tile for this group is the final tile of the problem held by the final thread in the warp.
int32_t group_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp-1);
// Keep the starting problem for this group in `problem_idx`. This is done to reduce
// register pressure. The starting problem for this group is simply the first problem
// in the group most recently fetched by the warp.
int32_t &group_problem_start = this->problem_idx;
group_problem_start = (this->problem_idx / kThreadsPerWarp) * kThreadsPerWarp;
// Keep the starting tile for this group in `problem_tile_start`. This is done to reduce
// register pressure.
int32_t &group_tile_start = this->problem_tile_start;
// Each thread in the warp processes a separate problem to advance until
// reaching a problem whose starting tile is less less than tile_idx.
while (group_tile_end <= this->tile_idx) {
group_problem_start += kThreadsPerWarp;
if (group_problem_start > this->params.problem_count) {
return false;
}
// Since `group_tile_start` is a reference to `this->problem_tile_start`, this
// also sets `this->problem_tile_start`. The fact that `this->problem_tile_start`
// is also set here is used later in `next_tile`.
group_tile_start = group_tile_end;
int lane_idx = threadIdx.x % kThreadsPerWarp;
int32_t lane_problem = group_problem_start + lane_idx;
// Compute the number of tiles in the problem assigned to each thread.
problem_ending_tile = 0;
if (lane_problem < this->params.problem_count) {
cutlass::gemm::GemmCoord problem = this->params.problem_sizes[lane_problem];
this->possibly_transpose_problem(problem);
cutlass::gemm::GemmCoord grid = this->grid_shape(problem);
problem_ending_tile = this->tile_count(grid);
}
// Compute a warp-wide inclusive prefix sum to compute the ending tile index of
// each thread's problem.
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < kThreadsPerWarp; i <<= 1) {
int32_t val = __shfl_up_sync(0xffffffff, problem_ending_tile, i);
if (lane_idx >= i) {
problem_ending_tile += val;
}
}
// The total tile count for this group is now in the final position of the prefix sum
int32_t tiles_in_group = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp-1);
problem_ending_tile += group_tile_start;
group_tile_end += tiles_in_group;
}
// The next problem to process is the first one that does not have ending tile position
// that is greater than or equal to tile index.
int32_t problem_idx_in_group =
__popc(__ballot_sync(0xffffffff, problem_ending_tile <= this->tile_idx));
this->problem_idx = group_problem_start + problem_idx_in_group;
// The starting tile for this problem is the ending tile of the previous problem. In cases
// where `problem_idx_in_group` is the first problem in the group, we do not need to reset
// `problem_tile_start`, because it is set to the previous group's ending tile in the while
// loop above.
if (problem_idx_in_group > 0) {
this->problem_tile_start = __shfl_sync(0xffffffff, problem_ending_tile, problem_idx_in_group - 1);
}
return true;
}
static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count) {
return 0;
}
static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count,
void* host_workspace_ptr) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Precomputes schedule on host and prefetches into shared memory
//
template <typename ProblemSizeHelper,
typename ThreadblockShape,
int PrefetchTileCount,
int ThreadCount>
struct GroupedProblemVisitor<ProblemSizeHelper,
ThreadblockShape,
GroupScheduleMode::kHostPrecompute,
PrefetchTileCount,
ThreadCount> : public BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape> {
static_assert(PrefetchTileCount > 0,
"GroupedProblemVisitor with GroupScheduleMode `kHostPrecompute` currently requires prefetching to shared memory");
using Base = BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape>;
using Params = typename Base::Params;
using ProblemInfo = typename Base::ProblemInfo;
static bool const kRequiresPrecomputation = true;
static int const kPrefetchTileCount = PrefetchTileCount;
static int const kThreadCount = ThreadCount;
struct SharedStorage {
// Sequence of problem IDs and starting tiles to compute
cutlass::Array<ProblemInfo, kPrefetchTileCount> prefetched_problems;
};
int32_t tiles_computed;
int32_t iterations_per_block;
int32_t block_load_start;
SharedStorage &shared_storage;
ProblemInfo const *problem_info_ptr;
//
// Methods
//
CUTLASS_DEVICE
GroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base(params_, block_idx),
tiles_computed(0),
shared_storage(shared_storage_),
problem_info_ptr(reinterpret_cast<ProblemInfo const*>(params_.workspace))
{
iterations_per_block = (params_.tile_count - 1 + gridDim.x) / gridDim.x;
block_load_start = iterations_per_block * block_idx;
// Start prefetching the first set of tiles to compute
prefetch_tiles();
}
CUTLASS_DEVICE
bool next_tile() {
if (this->tile_idx >= this->params.tile_count) {
return false;
}
int32_t prefetch_idx = (tiles_computed % kPrefetchTileCount);
if (prefetch_idx == 0) {
// Ensure all previous stores to shared memory have been completed
__syncthreads();
}
auto problem_info = shared_storage.prefetched_problems[prefetch_idx];
++tiles_computed;
if ((tiles_computed % kPrefetchTileCount) == 0) {
// Begin prefetching next set of tiles. Synchronize first to ensure that
// we don't overwrite the current buffer while someone else is using it.
__syncthreads();
prefetch_tiles();
}
this->problem_idx = problem_info.problem_idx;
this->problem_tile_start = problem_info.problem_start;
return true;
}
static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count) {
int32_t total_tiles = Base::group_tile_count(host_problem_sizes_ptr, problem_count);
int32_t entries_per_block = ((total_tiles - 1 + block_count) / block_count);
return sizeof(ProblemInfo) * entries_per_block * block_count;
}
#if !defined(__CUDACC_RTC__)
static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count,
void* host_workspace_ptr) {
ProblemInfo* host_problem_info_ptr = reinterpret_cast<ProblemInfo*>(host_workspace_ptr);
int32_t total_tiles = Base::group_tile_count(host_problem_sizes_ptr, problem_count);
int32_t entries_per_block = (total_tiles - 1 + block_count) / block_count;
int tile = 0;
int start_tile = 0;
for (int p_idx = 0; p_idx < problem_count; ++p_idx) {
auto problem = host_problem_sizes_ptr[p_idx];
Base::possibly_transpose_problem(problem);
auto grid = Base::grid_shape(problem);
int tiles = Base::tile_count(grid);
ProblemInfo problem_info(p_idx, start_tile);
for (int i = 0; i < tiles; ++i, ++tile) {
host_problem_info_ptr[(entries_per_block * (tile % block_count)) + (tile / block_count)] = problem_info;
}
start_tile += tiles;
}
}
#endif
private:
CUTLASS_DEVICE
void prefetch_tiles() {
// TODO: Consider changing to use async copies from global to shared mem
CUTLASS_PRAGMA_UNROLL
for (int32_t i = 0; i < kPrefetchTileCount; i += kThreadCount) {
int32_t offset = threadIdx.x + i;
if (offset < kPrefetchTileCount && (tiles_computed + offset < iterations_per_block)) {
shared_storage.prefetched_problems[offset] = problem_info_ptr[block_load_start + tiles_computed + offset];
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 16,849 | C | 35.236559 | 130 | 0.634756 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_with_k_reduction.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename EpilogueGemmKReduction_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmWithKReduction {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using EpilogueGemmKReduction = EpilogueGemmKReduction_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
using LayoutGemmKReduction = cutlass::layout::PitchLinear;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
static int const kReduceKForA = Mma::kReduceKForA;
//
// Structures
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
void * ptr_gemm_k_reduction;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_gemm_k_reduction;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc;
typename LayoutC::Stride::Index ldd;
typename LayoutGemmKReduction::Stride::Index ld_gemm_k_reduction;
//
// Methods
//
Arguments() :
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
ptr_gemm_k_reduction(nullptr)
{}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
void * ptr_gemm_k_reduction,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
int64_t batch_stride_gemm_k_reduction,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd,
typename LayoutGemmKReduction::Stride::Index ld_gemm_k_reduction)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), ptr_gemm_k_reduction(ptr_gemm_k_reduction),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_gemm_k_reduction(batch_stride_gemm_k_reduction),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ld_gemm_k_reduction(ld_gemm_k_reduction)
{
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>;
//
// Data members
//
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
void * ptr_gemm_k_reduction;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_gemm_k_reduction;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A(args.lda),
params_B(args.ldb),
params_C(args.ldc),
params_D(args.ldd),
output_op(args.epilogue),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_gemm_k_reduction(args.batch_stride_gemm_k_reduction),
ptr_D(args.ptr_D),
ptr_gemm_k_reduction(args.ptr_gemm_k_reduction)
{}
/// Assign and initialize the specified workspace buffer. Assumes
/// the memory allocated to workspace is at least as large as get_workspace_size().
Status init_workspace(
void *workspace,
cudaStream_t stream = nullptr)
{
CUTLASS_TRACE_HOST("GemmUniversal::Params::Params() - problem_size: " << this->problem_size);
if (this->mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D = workspace;
ptr_gemm_k_reduction = static_cast<uint8_t *>(workspace)
+ sizeof(ElementC) * size_t(this->batch_stride_D) * size_t(this->grid_tiled_shape.k());
return Status::kSuccess;
}
return ParamsBase::init_workspace(workspace, stream);
}
/// Returns the workspace size (in bytes) needed for this problem geometry
size_t get_workspace_size() const
{
size_t workspace_bytes = ParamsBase::get_workspace_size();
if (this->mode == GemmUniversalMode::kGemmSplitKParallel)
{
// Split-K parallel always requires a temporary workspace
workspace_bytes +=
sizeof(ElementC) *
size_t(batch_stride_gemm_k_reduction) *
size_t(this->grid_tiled_shape.k());
}
return workspace_bytes;
}
/// Lightweight update given a subset of arguments. Problem geometry is assumed
/// to remain the same.
void update(Arguments const &args)
{
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
ptr_gemm_k_reduction = args.ptr_gemm_k_reduction;
output_op = args.epilogue;
CUTLASS_TRACE_HOST("GemmUniversal::Params::update()");
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
CUTLASS_TRACE_HOST("GemmUniversal::can_implement()");
static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand A");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand B");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand C");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmWithKReduction op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
typename Mma::FragmentReduction gemm_k_accumulators;
gemm_k_accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators,
gemm_k_accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
ElementC *ptr_gemm_k_reduction = static_cast<ElementC *>(params.ptr_gemm_k_reduction);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
ptr_gemm_k_reduction += threadblock_tile_offset.k() * params.batch_stride_gemm_k_reduction;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
if ((kReduceKForA && threadblock_tile_offset.n() == 0)
|| (!kReduceKForA && threadblock_tile_offset.m() == 0)) {
int warp_idx_mn = warp_idx % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Mma::Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Mma::Base::WarpCount::kM;
if ((kReduceKForA && warp_idx_n == 0)
|| (!kReduceKForA && warp_idx_m == 0)) {
int reduction_warp_idx = kReduceKForA ? warp_idx_m : warp_idx_n;
int reduction_threadblock_offset = kReduceKForA ? threadblock_tile_offset.m() :
threadblock_tile_offset.n();
int reduction_vector_size = kReduceKForA ? params.problem_size.m()
: params.problem_size.n();
EpilogueGemmKReduction epilogue_gemm_k_reduction(thread_idx,
reduction_warp_idx,
lane_idx,
reduction_threadblock_offset,
ptr_gemm_k_reduction);
epilogue_gemm_k_reduction(
reduction_vector_size,
gemm_k_accumulators,
params.mode == GemmUniversalMode::kGemm
&& (params.grid_tiled_shape.k() > 1)
&& (threadblock_tile_offset.k() > 0));
}
}
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 23,629 | C | 32.951149 | 163 | 0.615515 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Problem visitor for grouped Rank2K operations.
This problem visitor is specialized for Rank2K operations, for which matrix C is upper/lower
triangular. Using a problem visitor designed for GEMMs for Rank2K problems is inefficient
because threadblocks will be frequently assigned to tiles that exit early (e.g., due to
being assigned to a tile in the upper-triangular portion of a lower-triangular problem).
This can lead to load imbalance among threadblocks, as the GEMM-based scheduler
assigns all threadblocks to nearly the same number of tiles, regardless of whether
those tiles exit early.
Consider an example of a group of four Rank2Ks with matrix C consisting of a grid of 2x2 tiles.
Consider a grid of 8 threadblocks. The default GEMM scheduler will assign threadblocks to
tiles in the following order:
Rank2K 0 Rank2K 1 Rank2K 2 Rank2K 3
0 1 4 5 0 1 4 5
2 3 6 7 2 3 6 7
Assuming that the problems are lower triangular, blocks 1 and 5 are continuously assigned
to inactive tiles.
This problem visitor aims to assign threadblocks to only those tiles which are in the
upper/lower triangular portion of a given problem. Using the example above, the resulting
assignment would be:
Rank2K 0 Rank2K 1 Rank2K 2 Rank2K 3
0 - 3 - 6 - 1 -
1 2 4 5 7 0 2 3
Achieving the schedule above requires a mapping from threadblock ID to tile coordinates (i, j).
We will illustrate this by mapping on a lower-triangular matrix with a 3x3 grid. We first
calculate row and column indices assuming one-indexed rows, tiles, and threadblock IDs, and
then subtract one to convert to zero-indexed.
Col 1 Col 2 Col 3
----------------------
Row 1 | 1 - -
Row 2 | 2 3 -
Row 3 | 4 5 6
We next outline this mapping, borrowing from: https://stackoverflow.com/a/40954159
Calculating row i given threadblock ID t
----------------------------------------
For a given row i, all threadblock IDs t in that row satisfy the following:
t <= 1 + 2 + 3 + ... + (i-1) + i
The closed-form equation for the right-hand side is: i(i+1)/2.
Using this, we can solve for i given t:
t <= i(i+1)/2
2t <= i^2 + i
2t <= i^2 + i + 0.25 - 0.25
2t + 0.25 <= i^2 + i + 0.25
2t + 0.25 <= (i + 0.5)^2
sqrt(2t + 0.25) - 0.5 <= i
To account for fractional values, we set:
i = ceil(sqrt(2t + 0.25) - 0.5)
To turn this into a zero-indexed row and work with zero-indexed t, we perform:
i = ceil(sqrt(2(t+1) + 0.25) - 0.5) - 1
= ceil(sqrt(2t + 2.25) - 0.5) - 1
Calculating column j given threadblock ID t and row i
-----------------------------------------------------
For a given row i, all threadblock IDs t in that row also satisfy the following:
t > 1 + 2 + 3 + ... + (i-2) + (i-1)
--> t > i(i-1)/2
Threadblock IDs within a given row are sequential, so the one-indexed column ID
for one-indexed threadblock ID t and row i is:
j = t - (i(i-1)/2)
The zero-indexed version becomes:
j = (t+1) - (i(i+1)/2) -1
= t - (i(i+1)/2)
Accounting for non-square grids
-------------------------------
Though the overall output problem size for Rank2K problems is guranteed to be square, the
grids used in computing may not be square due to using non-square threadblock shapes. For
example, a threadblock shape of 64x32 operating on a problem of output size 128x128 would
result in a grid of 2x4 tiles.
This case can be handled by noting that the output resembles a square grid of 2x2 "macro tiles"
each of which contains 2 "true tiles." We can thus first map a threadblock ID to its "macro tile"
using the equations above, and then map it to the "true tile" within its "macro tile." In the example
of a 2x4 grid, this mapping would look as follows:
"Macro grid" "True grid"
{0, 1} - 0 1 - -
{2, 3} {4, 5} 2 3 4 5
A zero-indexed threadblock ID t is mapped to its "macro tile ID" t_macro as:
t_macro = t // r
Where r is the ratio of the maximum dimension of the grid to the minimum dimension of the grid
(i.e., r = 4 / 2 = 2 in the previous example).
One uses t_macro and the calculations above to find the row and column in the square matrix to
obtain i_macro and j_macro (zero-indexed). The mapping from (i_macro, j_macro) --> (i, j)
is simply the following:
if (ThreadblockShape::M > ThreadblockShape::N):
r = ThreadblockShape::M / ThreadblockShape::N
i = i_macro
j = (j_macro * r) + (t % r)
elif (ThreadblockShape::M < ThreadblockShape::N):
r = ThreadblockShape::N / ThreadblockShape::M
i = (i_macro * r) + (t % r)
j = j_macro
else:
i = i_macro
j = j_macro
Handling cases with grid dimensions that aren't multiples of eachother
----------------------------------------------------------------------
Even though threadblock shapes M and N are typically multiples of one another, the grid
for a given problem may not have dimensions of the same ratio as that of the threadblock.
For example, a problem of size 132x132 using a threadblock of shape 64x32 will result
in a grid of 3x5 tiles. In this case, there is not an integer number of "true tiles"
per "macro tile."
When this scenario arises, we simply pad the larger dimension of the grid such that
there are an integer number of "true tiles" per "macro tile." Thus, the 3x5 grid in
the example above will be treated as a 3x6 grid. Row and column positions for each
tile are calculated as above. Any threadblocks that map to tiles that are outside the
problem range or upper/lower triangular portion (e.g., (2, 5)) will exit early from
this problem and may proceed to the next problem in the group.
Handling upper-triangular matrices
----------------------------------
The only modification needed for upper-triangular matrices is to swap i_macro and j_macro
in the calculations above.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/gemm/kernel/grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
namespace detail {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Helpers for calculating offsets for Rank2K problem visitor. These helpers specifically pertain
// to the conversion from "macro tiles" to "true tiles" in the description above.
//
template <
typename ThreadblockShape,
typename Enable = void
>
struct Rank2KGroupedProblemVisitorOffsetHelper;
// Partial specialization for the case where threadblock shape M > threadblock shape N
template <
typename ThreadblockShape
>
struct Rank2KGroupedProblemVisitorOffsetHelper<
ThreadblockShape,
typename platform::enable_if< (ThreadblockShape::kM > ThreadblockShape::kN) >::type
> {
static_assert(ThreadblockShape::kM % ThreadblockShape::kN == 0,
"Rank2KGroupedProblemVisitor with threadblock shape M > threadblock shape N "
"requires that threadblock shape M be a multiple of threadblock shape N.");
static int32_t const kThreadblockSkewRatio = ThreadblockShape::kM / ThreadblockShape::kN;
CUTLASS_HOST_DEVICE
static int32_t min_dim(cutlass::gemm::GemmCoord grid) {
return grid.m();
}
CUTLASS_HOST_DEVICE
static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) {
return row;
}
CUTLASS_HOST_DEVICE
static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) {
return (col * kThreadblockSkewRatio) + (threadblock_id % kThreadblockSkewRatio);
}
};
// Partial specialization for the case where threadblock shape M < threadblock shape N
template <
typename ThreadblockShape
>
struct Rank2KGroupedProblemVisitorOffsetHelper<
ThreadblockShape,
typename platform::enable_if< (ThreadblockShape::kM < ThreadblockShape::kN) >::type
> {
static_assert(ThreadblockShape::kN % ThreadblockShape::kM == 0,
"Rank2KGroupedProblemVisitor with threadblock shape M < threadblock shape N "
"requires that threadblock shape N be a multiple of threadblock shape M.");
static int32_t const kThreadblockSkewRatio = ThreadblockShape::kN / ThreadblockShape::kM;
CUTLASS_HOST_DEVICE
static int32_t min_dim(cutlass::gemm::GemmCoord grid) {
return grid.n();
}
CUTLASS_HOST_DEVICE
static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) {
return (row * kThreadblockSkewRatio) + (threadblock_id % kThreadblockSkewRatio);
}
CUTLASS_HOST_DEVICE
static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) {
return col;
}
};
// Partial specialization for the case where threadblock shape M == threadblock shape N
// In this case, macro tiles are equivalent to true tiles, so the conversions are
// identity functions.
template <
typename ThreadblockShape
>
struct Rank2KGroupedProblemVisitorOffsetHelper<
ThreadblockShape,
typename platform::enable_if< (ThreadblockShape::kM == ThreadblockShape::kN) >::type
> {
static int32_t const kThreadblockSkewRatio = 1;
CUTLASS_HOST_DEVICE
static int32_t min_dim(cutlass::gemm::GemmCoord grid) {
return grid.m();
}
CUTLASS_HOST_DEVICE
static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) {
return row;
}
CUTLASS_HOST_DEVICE
static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) {
return col;
}
};
// Helper for correctly representing problem sizes in grouped kernels
template <typename ThreadblockShape>
struct Rank2KGroupedProblemSizeHelper {
using OffsetHelper = Rank2KGroupedProblemVisitorOffsetHelper<ThreadblockShape>;
CUTLASS_HOST_DEVICE
static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) {
return cutlass::gemm::GemmCoord(
((problem.m() - 1 + ThreadblockShape::kM) / ThreadblockShape::kM),
((problem.n() - 1 + ThreadblockShape::kN) / ThreadblockShape::kN),
1);
}
CUTLASS_HOST_DEVICE
static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) {
// Return the number of tiles at or below the diagonal (or at and above
// for mode kUpper). We do this by first calculating this value assuming
// we have a square matrix of tiles of size `dim x dim` where `dim` is the
// minimum among {grid.m(), grid.n()}. We then multiply the resulting value
// by OffsetHelper::kThreadblockSkewRatio to account for cases in which there
// are more tiles in one dimension than the other.
int32_t dim = OffsetHelper::min_dim(grid);
int32_t tiles_on_diagonal = dim;
int32_t tiles_below_diagonal = ((dim * (dim - 1)) / 2);
return (tiles_on_diagonal + tiles_below_diagonal) * OffsetHelper::kThreadblockSkewRatio;
}
CUTLASS_HOST_DEVICE
static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Default problem visitor for fill modes kUpper and kLower.
//
template <typename ThreadblockShape,
GroupScheduleMode GroupScheduleMode_,
int PrefetchTileCount,
int ThreadCount,
cutlass::FillMode FillModeC>
struct Rank2KGroupedProblemVisitor : public GroupedProblemVisitor<
detail::Rank2KGroupedProblemSizeHelper<ThreadblockShape>,
ThreadblockShape,
GroupScheduleMode_,
PrefetchTileCount,
ThreadCount> {
static cutlass::FillMode const kFillModeC = FillModeC;
static_assert(kFillModeC == cutlass::FillMode::kLower || kFillModeC == cutlass::FillMode::kUpper,
"Default Rank2KGroupedProblemVisitor requires fill mode of kLower or kUpper.");
using ProblemSizeHelper = detail::Rank2KGroupedProblemSizeHelper<ThreadblockShape>;
using Base = GroupedProblemVisitor<ProblemSizeHelper,
ThreadblockShape,
GroupScheduleMode_,
PrefetchTileCount,
ThreadCount>;
using OffsetHelper = typename ProblemSizeHelper::OffsetHelper;
using Params = typename Base::Params;
using SharedStorage = typename Base::SharedStorage;
//
// Methods
//
CUTLASS_DEVICE
Rank2KGroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base(params_, shared_storage_, block_idx)
{}
CUTLASS_DEVICE
cutlass::gemm::GemmCoord threadblock_offset(int32_t threadblock_id) const {
int32_t macro_id = threadblock_id / OffsetHelper::kThreadblockSkewRatio;
int32_t macro_row = ceil(cutlass::fast_sqrt((2*macro_id) + 2.25) - 0.5) - 1;
int32_t macro_col = macro_id - (((macro_row+1) * macro_row)/2);
if (kFillModeC == cutlass::FillMode::kUpper) {
swap(macro_row, macro_col);
}
int32_t row = OffsetHelper::macro_row_to_row(macro_row, threadblock_id);
int32_t col = OffsetHelper::macro_col_to_col(macro_col, threadblock_id);
return cutlass::gemm::GemmCoord(row, col, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 16,100 | C | 41.708223 | 105 | 0.632733 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/params_universal_base.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Base functionality for common types of universal GEMM kernel parameters
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Argument structure
struct UniversalArgumentsBase
{
//
// Data members
//
GemmUniversalMode mode;
GemmCoord problem_size;
int batch_count;
int64_t batch_stride_D;
//
// Methods
//
UniversalArgumentsBase() :
mode(GemmUniversalMode::kGemm),
batch_count(1),
batch_stride_D(0)
{}
/// constructs an arguments structure
UniversalArgumentsBase(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
int64_t batch_stride_D)
:
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
batch_stride_D(batch_stride_D)
{
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
};
/// Parameters structure
template <
typename ThreadblockSwizzle,
typename ThreadblockShape,
typename ElementA,
typename ElementB,
typename ElementC>
struct UniversalParamsBase
{
//
// Data members
//
GemmCoord problem_size;
GemmCoord grid_tiled_shape;
int swizzle_log_tile;
GemmUniversalMode mode;
int batch_count;
int gemm_k_size;
int64_t batch_stride_D;
int *semaphore;
//
// Host dispatch API
//
/// Default constructor
UniversalParamsBase() = default;
/// Constructor
UniversalParamsBase(
UniversalArgumentsBase const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
problem_size(args.problem_size),
mode(args.mode),
batch_count(args.batch_count),
batch_stride_D(args.batch_stride_D),
semaphore(nullptr)
{
ThreadblockSwizzle swizzle;
// Get GEMM volume in thread block tiles
grid_tiled_shape = swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
swizzle_log_tile = swizzle.get_log_tile(grid_tiled_shape);
// Determine extent of K-dimension assigned to each block
gemm_k_size = args.problem_size.k();
if (args.mode == GemmUniversalMode::kGemm || args.mode == GemmUniversalMode::kGemmSplitKParallel)
{
int const kAlignK = const_max(const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value), 1);
gemm_k_size = round_up(ceil_div(args.problem_size.k(), args.batch_count), kAlignK);
if (gemm_k_size) {
grid_tiled_shape.k() = ceil_div(args.problem_size.k(), gemm_k_size);
}
}
}
/// Returns the workspace size (in bytes) needed for this problem geometry
size_t get_workspace_size() const
{
size_t workspace_bytes = 0;
if (mode == GemmUniversalMode::kGemmSplitKParallel)
{
// Split-K parallel always requires a temporary workspace
workspace_bytes =
sizeof(ElementC) *
size_t(batch_stride_D) *
size_t(grid_tiled_shape.k());
}
else if (mode == GemmUniversalMode::kGemm && grid_tiled_shape.k() > 1)
{
// Serial split-K only requires a temporary workspace if the number of partitions along the
// GEMM K dimension is greater than one.
workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n());
}
return workspace_bytes;
}
/// Assign and initialize the specified workspace buffer. Assumes
/// the memory allocated to workspace is at least as large as get_workspace_size().
Status init_workspace(
void *workspace,
cudaStream_t stream = nullptr)
{
semaphore = static_cast<int *>(workspace);
// Zero-initialize entire workspace
if (semaphore)
{
size_t workspace_bytes = get_workspace_size();
CUTLASS_TRACE_HOST(" Initialize " << workspace_bytes << " workspace bytes");
cudaError_t result = cudaMemsetAsync(
semaphore,
0,
workspace_bytes,
stream);
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Returns the GEMM volume in thread block tiles
GemmCoord get_tiled_shape() const
{
return grid_tiled_shape;
}
/// Returns the total number of thread blocks to launch
int get_grid_blocks() const
{
dim3 grid_dims = get_grid_dims();
return grid_dims.x * grid_dims.y * grid_dims.z;
}
/// Returns the grid extents in thread blocks to launch
dim3 get_grid_dims() const
{
return ThreadblockSwizzle().get_grid_shape(grid_tiled_shape);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 7,148 | C | 28.060975 | 122 | 0.629687 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_rank_2k_grouped.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level grouped Rank2K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/kernel/rank_2k_transpose_operands.h"
#include "cutlass/gemm/kernel/default_rank_2k.h"
#include "cutlass/gemm/kernel/default_rank_2k_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_ = BlasMode::kSymmetric,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly,
///
typename Enable = void
>
struct DefaultRank2KGrouped;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Real-valued grouped Rank2K
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_
>
struct DefaultRank2KGrouped<ElementA, LayoutA, TransformA, kAlignmentA,
ElementB, LayoutB, TransformB, kAlignmentB,
ElementC, LayoutC,
FillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape,
WarpShape, InstructionShape, EpilogueOutputOp,
ThreadblockSwizzle, Stages, Operator, BlasMode_, GroupScheduleMode_,
typename std::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
> {
// If true, we must construct a 'transposed-and-exchanged' Rank2K operator.
static bool const kInternalTranspose = platform::is_same<LayoutC, layout::ColumnMajor>::value;
using MapArguments = kernel::detail::Rank2KMapArguments<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
LayoutC,
FillModeC,
kInternalTranspose
>;
// Define the default grouped Rank2K kernel
using DefaultRank2Kkernel = typename kernel::DefaultRank2K<
typename MapArguments::ElementA,
typename MapArguments::LayoutA,
MapArguments::kAlignmentA,
typename MapArguments::ElementB,
typename MapArguments::LayoutB,
MapArguments::kAlignmentB,
ElementC,
typename MapArguments::LayoutC,
MapArguments::kFillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
false, // SplitKSerial
Operator,
BlasMode_
>::Rank2Kkernel;
/// Define the kernel in terms of the default kernel
using Rank2Kkernel = kernel::Rank2KGrouped<
typename DefaultRank2Kkernel::Mma1,
typename DefaultRank2Kkernel::Mma2,
typename DefaultRank2Kkernel::Epilogue,
ThreadblockSwizzle,
TransformA,
TransformB,
DefaultRank2Kkernel::kFillModeC,
DefaultRank2Kkernel::kBlasMode,
GroupScheduleMode_,
kInternalTranspose
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex-valued grouped Rank2K
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_
>
struct DefaultRank2KGrouped<ElementA, LayoutA, TransformA, kAlignmentA,
ElementB, LayoutB, TransformB, kAlignmentB,
ElementC, LayoutC,
FillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape,
WarpShape, InstructionShape, EpilogueOutputOp,
ThreadblockSwizzle, Stages, Operator, BlasMode_, GroupScheduleMode_,
typename std::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
> {
// If true, we must construct a 'transposed-and-exchanged' Rank2K operator.
static bool const kInternalTranspose = platform::is_same<LayoutC, layout::ColumnMajor>::value;
using MapArguments = kernel::detail::Rank2KMapArguments<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
LayoutC,
FillModeC,
kInternalTranspose
>;
// Define the default grouped Rank2K kernel
using DefaultRank2Kkernel = typename kernel::DefaultRank2KComplex<
typename MapArguments::ElementA,
typename MapArguments::LayoutA,
typename MapArguments::ElementB,
typename MapArguments::LayoutB,
ElementC,
typename MapArguments::LayoutC,
MapArguments::kFillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MapArguments::kTransformA,
MapArguments::kTransformB,
Operator,
false, // SplitKSerial
BlasMode_
>::Rank2Kkernel;
/// Define the kernel in terms of the default kernel
/// Pass through the user-provided TransformA and TransformB so as to
/// correctly set public-facing TransformA and TransformB in kernel::Rank2KGrouped.
/// This is needed because kernel::DefaultRank2KComplex may change TransformA and
/// TransformB that become template arguments to Mma1 and Mma2.
using Rank2Kkernel = kernel::Rank2KGrouped<
typename DefaultRank2Kkernel::Mma1,
typename DefaultRank2Kkernel::Mma2,
typename DefaultRank2Kkernel::Epilogue,
ThreadblockSwizzle,
TransformA,
TransformB,
DefaultRank2Kkernel::kFillModeC,
DefaultRank2Kkernel::kBlasMode,
GroupScheduleMode_,
kInternalTranspose
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 12,470 | C | 34.030899 | 100 | 0.673857 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/semaphore.h"
#include "cutlass/arch/arch.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled.
>
struct Gemm {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static bool const kSplitKSerial = SplitKSerial;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorA::TensorRef ref_A;
typename Mma::IteratorB::Params params_B;
typename Mma::IteratorB::TensorRef ref_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::TensorRef ref_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::OutputTileIterator::TensorRef ref_D;
typename OutputOp::Params output_op;
int *semaphore;
int gemm_k_size;
// For gather+scatter operations
int const *gather_A_indices;
int const *gather_B_indices;
int const *scatter_D_indices;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(): swizzle_log_tile(0), semaphore(0), gemm_k_size(0) { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
typename OutputOp::Params output_op = typename OutputOp::Params(),
int *workspace = nullptr,
int const *gather_A_indices = nullptr,
int const *gather_B_indices = nullptr,
int const *scatter_D_indices = nullptr
):
problem_size(problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A.layout()),
ref_A(ref_A),
params_B(ref_B.layout()),
ref_B(ref_B),
params_C(ref_C.layout()),
ref_C(ref_C),
params_D(ref_D.layout()),
ref_D(ref_D),
output_op(output_op),
gather_A_indices(gather_A_indices),
gather_B_indices(gather_B_indices),
scatter_D_indices(scatter_D_indices) {
int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
semaphore = workspace;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
Gemm() { }
/// Determines whether kernel satisfies alignment
CUTLASS_HOST_DEVICE
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D) {
static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (platform::is_same<typename Epilogue::OutputTileIterator::Layout,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Epilogue::OutputTileIterator::Layout,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
if (!TensorRef_aligned(ref_A, kAlignmentA)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_D, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size,
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Problem size is a function of threadblock index in the K dimension
int problem_size_k = min(
params.problem_size.k(),
(threadblock_tile_offset.k() + 1) * params.gemm_k_size);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A,
params.gather_A_indices);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B,
params.gather_B_indices);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
if (!kSplitKSerial || gemm_k_iterations > 0) {
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
}
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// If performing a reduction via split-K, fetch the initial synchronization
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
params.ref_C.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset,
params.scatter_D_indices
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset,
params.scatter_D_indices
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op, iterator_D, accumulators, iterator_C);
//
// Release the semaphore
//
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 13,381 | C | 34.123359 | 108 | 0.615649 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/trmm_universal.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/core_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
SideMode SideMode_, ///! Side Mode for the kernel (kLeft or kRight)
FillMode FillMode_, ///! Fill Mode for triangular matrix (kLower or kUpper)
DiagType DiagType_ ///! Diag Type for triangular matrix (kNonUnit or kUnit)
>
struct TrmmUniversal {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static SideMode const kSideMode = SideMode_;
static FillMode const kFillMode = FillMode_;
static DiagType const kDiagType = DiagType_;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode;
GemmCoord problem_size;
int batch_count;
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_D;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldd;
//
// Methods
//
Arguments():
mode(GemmUniversalMode::kGemm),
batch_count(1),
ptr_A(nullptr), ptr_B(nullptr), ptr_D(nullptr) { }
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_D,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldd
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_D(batch_stride_D),
lda(lda), ldb(ldb), ldd(ldd) {
}
/// Returns arguments for the transposed problem sizes
Arguments transposed_problem_size() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
return args;
}
/// Returns arguments for the transposed matrices
Arguments swapped_matrices() const {
Arguments args(*this);
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
GemmUniversalMode mode;
int batch_count;
int gemm_k_size;
void * ptr_A;
void * ptr_B;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_D;
int *semaphore;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
swizzle_log_tile(0),
params_A(0),
params_B(0),
params_D(0),
batch_count(0),
gemm_k_size(0),
mode(cutlass::gemm::GemmUniversalMode::kGemm),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_D(nullptr),
batch_stride_A(0),
batch_stride_B(0),
batch_stride_D(0),
semaphore(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
int gemm_k_size,
void *workspace = nullptr
):
problem_size(args.problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(args.lda),
params_B(args.ldb),
params_D(args.ldd),
output_op(args.epilogue),
mode(args.mode),
batch_count(args.batch_count),
gemm_k_size(gemm_k_size),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_D(args.ptr_D),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_D(args.batch_stride_D),
semaphore(static_cast<int *>(workspace)) {
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr) {
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_D = args.ptr_D;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_D = args.batch_stride_D;
output_op = args.epilogue;
semaphore = static_cast<int *>(workspace);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Methods
//
CUTLASS_DEVICE
TrmmUniversal() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
/******************************************************************************************************
First two cases: (Left Side, Lower Fill) and (Right Side, Upper Fill) are transpose of each other
- (Left Side, Lower Fill): calculate bottom of the CTA tile, then find the k-iterations
needed to process all elements till that coordinate.
- (Right Side, Upper Fill): calculate right end of the CTA tile, then find the k-iterations
needed to process all elements till that coordinate.
Last two cases: (Left Side, Upper Fill) and (Right Side, Lower Fill) are transpose of each other
- (Left Side, Upper Fill): calculate the top of the CTA tile, then find k-iterations
that can be skipped for all elements of this tile.
- (Right Side, Lower Fill): calculate the left start of the CTA tile, then find k-iterations
that can be skipped for all elements of this tile.
********************************************************************************************************/
if (kSideMode == SideMode::kLeft && kFillMode == FillMode::kLower) {
int k_iterations_till_diagonal = ((threadblock_tile_offset.m() + 1) * Mma::Shape::kM + Mma::Shape::kK - 1) / Mma::Shape::kK;
if (k_iterations_till_diagonal < gemm_k_iterations) {
gemm_k_iterations = k_iterations_till_diagonal;
}
} else if (kSideMode == SideMode::kRight && kFillMode == FillMode::kUpper) {
int k_iterations_till_diagonal = ((threadblock_tile_offset.n() + 1) * Mma::Shape::kN + Mma::Shape::kK - 1) / Mma::Shape::kK;
if (k_iterations_till_diagonal < gemm_k_iterations) {
gemm_k_iterations = k_iterations_till_diagonal;
}
} else if (kSideMode == SideMode::kLeft && kFillMode == FillMode::kUpper) {
int k_iterations_till_diagonal = ((threadblock_tile_offset.m()) * Mma::Shape::kM) / Mma::Shape::kK;
if (k_iterations_till_diagonal != 0) {
tb_offset_A += cutlass::MatrixCoord({0, k_iterations_till_diagonal * Mma::Shape::kK});
tb_offset_B += cutlass::MatrixCoord({k_iterations_till_diagonal * Mma::Shape::kK, 0});
gemm_k_iterations -= k_iterations_till_diagonal;
}
} else if (kSideMode == SideMode::kRight && kFillMode == FillMode::kLower) {
int k_iterations_till_diagonal = ((threadblock_tile_offset.n()) * Mma::Shape::kN) / Mma::Shape::kK;
if (k_iterations_till_diagonal != 0) {
tb_offset_A += cutlass::MatrixCoord({0, k_iterations_till_diagonal * Mma::Shape::kK});
tb_offset_B += cutlass::MatrixCoord({k_iterations_till_diagonal * Mma::Shape::kK, 0});
gemm_k_iterations -= k_iterations_till_diagonal;
}
}
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor (although irrelevant to this kernel as beta is zero).
typename Epilogue::OutputTileIterator iterator_C(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 19,537 | C | 31.563333 | 130 | 0.623944 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemv.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename ElementC_,
typename ElementAccumulator_,
typename EpilogueOutputOp_
>
struct Gemv {
public:
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using TensorRefA = TensorRef<ElementA, LayoutA>;
static_assert(platform::is_same<LayoutA, LayoutA_>::value,
"Only supported for column-major A matrix");
using ElementB = ElementB_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using EpilogueOutputOp = EpilogueOutputOp_;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
static int const kThreadCount = 32;
static int const kStages = 1;
static int const kAlignmentA = 1;
static int const kAlignmentB = 1;
static int const kAlignmentC = 1;
//
// Structures
//
/// Argument structure
struct Arguments {
MatrixCoord problem_size;
int32_t batch_count;
typename EpilogueOutputOp::Params output_op;
TensorRefA ref_A;
ElementB const *ptr_B;
ElementC const *ptr_C;
ElementC *ptr_D;
int64_t inc_B;
int64_t inc_C;
int64_t inc_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
//
// Methods
//
Arguments(): batch_count(0) { }
Arguments(
MatrixCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params output_op,
TensorRefA ref_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t inc_B,
int64_t inc_C,
int64_t inc_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D
):
problem_size(problem_size),
batch_count(batch_count),
output_op(output_op),
ref_A(ref_A),
ptr_B(static_cast<ElementB const *>(ptr_B)),
ptr_C(static_cast<ElementC const *>(ptr_C)),
ptr_D(static_cast<ElementC *>(ptr_D)),
inc_B(inc_B),
inc_C(inc_C),
inc_D(inc_D),
batch_stride_A(batch_stride_A),
batch_stride_B(batch_stride_B),
batch_stride_C(batch_stride_C),
batch_stride_D(batch_stride_D)
{ }
Arguments(
MatrixCoord problem_size,
typename EpilogueOutputOp::Params output_op,
TensorRefA ref_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t inc_B,
int64_t inc_C,
int64_t inc_D
):
Arguments(
problem_size,
1,
output_op,
ref_A,
ptr_B,
ptr_C,
ptr_D,
inc_B,
inc_C,
inc_D,
1,
1,
1,
1)
{ }
Status update(Arguments const &args) {
output_op = args.output_op;
ref_A = ref_A;
ptr_B = args.ptr_B;
ptr_C = args.ptr_C;
ptr_D = args.ptr_D;
return Status::kSuccess;
}
};
using Params = Arguments;
/// Shared memory storage structure
union SharedStorage {
};
public:
//
// Methods
//
CUTLASS_DEVICE
Gemv() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::MatrixCoord const & problem_size) {
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Loop over batch indices
for (int batch_idx = blockIdx.z; batch_idx < params.batch_count; batch_idx += gridDim.z) {
int i = blockIdx.x * kThreadCount + threadIdx.x;
ElementA const *ptr_A = params.ref_A.data() + i;
ElementB const *ptr_B = params.ptr_B;
ptr_A += batch_idx * params.batch_stride_A;
ptr_B += batch_idx * params.batch_stride_B;
ElementAccumulator accum = ElementAccumulator();
// Compute inner product
CUTLASS_PRAGMA_NO_UNROLL
for (int k = 0; k < params.problem_size.column(); ++k) {
// Fetch from A
ElementA a = ElementA();
if (i < params.problem_size.row()) {
a = *ptr_A;
}
ptr_A += params.ref_A.stride(0);
// Fetch from B
ElementB b = *ptr_B;
ptr_B += params.inc_B;
// Math
accum += ElementAccumulator(a) * ElementAccumulator(b);
}
//
// Epilogue phase
//
ElementC const *ptr_C = params.ptr_C + i * params.inc_C + batch_idx * params.batch_stride_C;
ElementC *ptr_D = params.ptr_D + i * params.inc_D + batch_idx * params.batch_stride_D;
EpilogueOutputOp output_op(params.output_op);
typename EpilogueOutputOp::FragmentAccumulator accum_fragment;
typename EpilogueOutputOp::FragmentOutput source_fragment;
typename EpilogueOutputOp::FragmentOutput output_fragment;
accum_fragment[0] = accum;
if (i < params.problem_size.row()) {
if (output_op.is_source_needed()) {
source_fragment[0] = *ptr_C;
output_fragment = output_op(accum_fragment, source_fragment);
}
else {
output_fragment = output_op(accum_fragment);
}
*ptr_D = output_fragment[0];
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,090 | C | 26.9 | 100 | 0.587515 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemv_batched_strided.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
namespace detail
{
template<typename ElementAlphaBeta, bool BetaIsZero>
struct GemvBatchedStridedEpilogueScaling
{
ElementAlphaBeta const & alpha;
ElementAlphaBeta const & beta;
CUTLASS_DEVICE
GemvBatchedStridedEpilogueScaling(ElementAlphaBeta& alpha_, ElementAlphaBeta& beta_) :
alpha(alpha_), beta(beta_)
{ }
template<typename FragmentCD, typename FragmentAccumulator>
CUTLASS_DEVICE
void operator()(FragmentAccumulator& accumulators,
FragmentCD const& fragment_C,
FragmentCD& fragment_D) const
{
using AccType = typename FragmentAccumulator::value_type;
using CDType = typename FragmentCD::value_type;
static_assert(FragmentCD::kElements == FragmentAccumulator::kElements,
"Mistmatch in fragment sizes.");
for (int i = 0; i < FragmentCD::kElements; ++i)
{
if (BetaIsZero)
{
fragment_D[i] = CDType(accumulators[i] * AccType(alpha));
}
else
{
fragment_D[i] = CDType(accumulators[i] * AccType(alpha)
+ AccType(fragment_C[i]) * AccType(beta));
}
}
}
};
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename GemvKernel, typename ElementAlphaBeta, bool BetaIsZero=false>
CUTLASS_DEVICE void GemvBatchedStridedDevice(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
ElementAlphaBeta beta,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_C,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldc,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
using ThreadBlockGemv = typename GemvKernel::ThreadBlockGemv;
using ThreadBlockSwizzle = typename GemvKernel::ThreadBlockSwizzle;
using EpilogueScale = detail::GemvBatchedStridedEpilogueScaling<ElementAlphaBeta, BetaIsZero>;
ThreadBlockSwizzle swizzler;
// Compute initial location in logical coordinates
BatchedGemmCoord tb_offset = swizzler.get_tile_offset();
int const batch_idx = swizzler.get_batch_idx();
// Offset to the batch
ref_A.add_pointer_offset(batch_idx*lda);
ref_B.add_pointer_offset(batch_idx*ldb);
// Construct iterators to A and B operands
typename GemvKernel::IteratorA::Params params_A(ref_A.layout());
typename GemvKernel::IteratorA iterator_A(
params_A,
ref_A.data(),
{ 1, problem_size.k() },
0,
{ 0, 0 });
typename GemvKernel::IteratorB::Params params_B(ref_B.layout());
typename GemvKernel::IteratorB iterator_B(
params_B,
ref_B.data(),
{ problem_size.k(), problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
//
// Main loop
//
// Construct thread-scoped matrix multiply
ThreadBlockGemv mma;
typename ThreadBlockGemv::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped gemv
mma(problem_size.mnk(), accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue (TODO: Epiloge as template argument)
//
typename GemvKernel::FragmentCD fragment_CD;
// Load C (skip if beta is zero)
if (!BetaIsZero)
{
tb_offset = swizzler.get_tile_offset();
ref_C.add_pointer_offset(batch_idx*ldc);
typename GemvKernel::IteratorCD::Params params_C(ref_C.layout());
typename GemvKernel::IteratorCD iterator_C(
params_C,
ref_C.data(),
{ 1, problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
iterator_C.load(fragment_CD);
}
// Apply alpha/beta scaling
EpilogueScale epilogue_scale(alpha, beta);
epilogue_scale(accumulators, fragment_CD, fragment_CD);
// Store D
tb_offset = swizzler.get_tile_offset();
ref_D.add_pointer_offset(batch_idx*ldd);
typename GemvKernel::IteratorCD::Params params_D(ref_D.layout());
typename GemvKernel::IteratorCD iterator_D(
params_D,
ref_D.data(),
{ 1, problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
iterator_D.store(fragment_CD);
}
template <typename GemvKernel, typename ElementAlphaBeta, bool BetaIsZero>
__global__ void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
ElementAlphaBeta beta,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_C,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldc,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, BetaIsZero>(
problem_size, alpha, beta, ref_A, lda, ref_B, ldb, ref_C, ldc, ref_D, ldd
);
}
template <typename GemvKernel, typename ElementAlphaBeta>
__global__ void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, true>(
problem_size, alpha, ElementAlphaBeta(0), ref_A, lda, ref_B, ldb, ref_D, ldd, ref_D, ldd
);
}
template <typename GemvKernel>
__global__ void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
using ElementAlphaBeta = typename GemvKernel::IteratorCD::Element;
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, true>(
problem_size, ElementAlphaBeta(1), ElementAlphaBeta(0), ref_A, lda, ref_B, ldb, ref_D, ldd, ref_D, ldd
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 8,979 | C | 35.653061 | 106 | 0.682481 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemv.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/gemm/threadblock/gemv.h"
#include "cutlass/gemm/threadblock/default_gemv_core.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the ThreadBlock tile - concept: gemm::GemmShape<>
typename ThreadBlockShape_,
/// Size of the per-thread shape - concept: gemm::GemmShape<>
typename ThreadShape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C/D matrix
typename ElementCD_,
/// Layout of C/D matrix (concept: MatrixLayout)
typename LayoutCD_,
/// Data type of the accumulator
typename ElementAccumulator_ = ElementCD_>
struct DefaultGemv {
/// Shape of Threadblock-level matrix operation (concept: GemmShape)
using ThreadBlockShape = ThreadBlockShape_;
/// Shape of warp-level matrix operation (concept: GemmShape)
using ThreadShape = ThreadShape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulators
using ElementAccumulator = ElementAccumulator_;
/// Data type of accumulators (same as C/D)
using LayoutAccumulator = LayoutCD_;
/// Data type of input/output matrix C/D
using ElementCD = ElementCD_;
/// Layout of input/output matrix C/D
using LayoutCD = LayoutCD_;
// Define the core components
using Core = typename cutlass::gemm::threadblock::DefaultGemvCore<
ThreadBlockShape, ThreadShape, ElementA, LayoutA, ElementB, LayoutB,
ElementAccumulator, LayoutAccumulator>;
// Define the threadblock-scoped gemv
using ThreadBlockGemv = cutlass::gemm::threadblock::Gemv<Core>;
// Iterator for multiplicand A
using IteratorA = typename ThreadBlockGemv::IteratorA;
// Iterator for multiplicand B
using IteratorB = typename ThreadBlockGemv::IteratorB;
/// Policy for the iterator that reads/writes C/D
using IteratorPolicyCD = typename platform::conditional<
platform::is_same<LayoutCD, layout::RowMajor>::value,
cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous<
layout::PitchLinearShape<ThreadBlockShape::kN, ThreadBlockShape::kM>, Core::kThreadsPerN, ThreadShape::kN>,
cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided<
layout::PitchLinearShape<ThreadBlockShape::kM, ThreadBlockShape::kN>, Core::kThreadsPerN, ThreadShape::kM>>::type;
/// Iterator that reads/writes C/D
using IteratorCD = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<ThreadBlockShape::kM, ThreadBlockShape::kN>, ElementCD, LayoutCD, 0, IteratorPolicyCD>;
/// Fragment storage for C/D
using FragmentCD = typename IteratorCD::Fragment;
// Define the threadblock swizzle
using ThreadBlockSwizzle = cutlass::gemm::threadblock::GemvBatchedStridedThreadblockDefaultSwizzle;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 5,349 | C | 39.225564 | 124 | 0.688727 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_universal_streamk.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/barrier.h"
#include "cutlass/block_striped.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock mapping function
>
struct GemmUniversalStreamk {
public:
//
// Types and constants
//
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
/// The per-thread tile of raw accumulators
using AccumulatorTile = typename Mma::FragmentC;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Workspace bytes per thread block
static size_t const kWorkspaceBytesPerBlock =
__NV_STD_MAX(
kThreadCount * sizeof(AccumulatorTile),
Epilogue::kWorkspaceBytesPerBlock);
/// Block-striped reduction utility
using BlockStripedReduceT = BlockStripedReduce<kThreadCount, AccumulatorTile>;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode;
GemmCoord problem_size;
int batch_count;
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
typename LayoutA::Stride stride_a;
typename LayoutB::Stride stride_b;
typename LayoutC::Stride stride_c;
typename LayoutC::Stride stride_d;
typename LayoutA::Stride::LongIndex lda;
typename LayoutB::Stride::LongIndex ldb;
typename LayoutC::Stride::LongIndex ldc;
typename LayoutC::Stride::LongIndex ldd;
int sm_limit; /// Carvout override: when the above are defaulted, the number of SMs that dispatch heuristics will attempt to load-balance
//
// Methods
//
/// Default Constructor
Arguments():
mode(GemmUniversalMode::kGemm),
batch_count(1),
ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr),
sm_limit(-1)
{}
/// Constructor
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride stride_a,
typename LayoutB::Stride stride_b,
typename LayoutC::Stride stride_c,
typename LayoutC::Stride stride_d,
int sm_limit = -1 /// Carvout override: when the above are defaulted, the number of SMs that dispatch heuristics will attempt to load-balance
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d), sm_limit(sm_limit)
{
CUTLASS_TRACE_HOST("GemmUniversalStreamk::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Constructor
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::LongIndex lda,
typename LayoutB::Stride::LongIndex ldb,
typename LayoutC::Stride::LongIndex ldc,
typename LayoutC::Stride::LongIndex ldd,
int sm_limit = -1 /// Carvout override: when the above are defaulted, the number of SMs that dispatch heuristics will attempt to load-balance
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), sm_limit(sm_limit)
{
stride_a = make_Coord(lda);
stride_b = make_Coord(ldb);
stride_c = make_Coord(ldc);
stride_d = make_Coord(ldd);
CUTLASS_TRACE_HOST("GemmUniversalStreamk::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const
{
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.stride_a, args.stride_b);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
/// Parameters structure
struct Params
{
public:
//
// Data members
//
ThreadblockSwizzle block_mapping;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
GemmUniversalMode mode;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
void *barrier_workspace;
void *partials_workspace;
protected:
//
// Host-only dispatch-utilities
//
/// Pad the given allocation size up to the nearest cache line
static size_t cacheline_align_up(size_t size)
{
static const int CACHELINE_SIZE = 128;
return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE;
}
/// Get the workspace size needed for barrier
size_t get_barrier_workspace_size() const
{
// For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction,
// each reduction block needs its own synchronization flag.
int sk_blocks = block_mapping.sk_regions * block_mapping.sk_blocks_per_region;
int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks);
return cacheline_align_up(sizeof(typename Barrier::T) * num_flags);
}
/// Get the workspace size needed for intermediate partial sums
size_t get_partials_workspace_size() const
{
int sk_blocks = block_mapping.sk_regions * block_mapping.sk_blocks_per_region;
return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks);
}
public:
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a),
params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b),
params_C(args.ldc ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldc) : args.stride_c),
params_D(args.ldd ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldd) : args.stride_d),
output_op(args.epilogue),
mode(args.mode),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(args.ptr_D),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
barrier_workspace(nullptr),
partials_workspace(nullptr)
{
// Number of SMs to make available for StreamK decomposition
int avail_sms = (args.sm_limit == -1) ?
device_sms :
fast_min(args.sm_limit, device_sms);
// Initialize the block mapping structure
block_mapping = ThreadblockSwizzle(
typename ThreadblockSwizzle::template KernelTraits<GemmUniversalStreamk>(),
args.mode,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count,
sm_occupancy,
avail_sms);
}
/// Returns the workspace size (in bytes) needed for these parameters
size_t get_workspace_size() const
{
return
get_barrier_workspace_size() +
get_partials_workspace_size();
}
/// Assign and initialize the specified workspace buffer. Assumes
/// the memory allocated to workspace is at least as large as get_workspace_size().
Status init_workspace(
void *workspace,
cudaStream_t stream = nullptr)
{
uint8_t *ptr = static_cast<uint8_t*>(workspace);
// Establish partials workspace
partials_workspace = nullptr;
size_t partials_workspace_bytes = get_partials_workspace_size();
if (partials_workspace_bytes > 0)
{
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
partials_workspace = ptr;
ptr += partials_workspace_bytes;
}
// Establish barrier workspace
barrier_workspace = nullptr;
size_t barrier_workspace_bytes = get_barrier_workspace_size();
if (barrier_workspace_bytes > 0)
{
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
barrier_workspace = ptr;
ptr += barrier_workspace_bytes;
}
// Zero-initialize barrier workspace
if (barrier_workspace)
{
size_t barrier_workspace_bytes = get_barrier_workspace_size();
CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes");
cudaError_t result = cudaMemsetAsync(
barrier_workspace,
0,
barrier_workspace_bytes,
stream);
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Returns the GEMM volume in thread block tiles
cutlass::gemm::GemmCoord get_tiled_shape() const
{
return block_mapping.tiled_shape;
}
/// Returns the total number of thread blocks to launch
int get_grid_blocks() const
{
dim3 grid_dims = get_grid_dims();
return grid_dims.x * grid_dims.y * grid_dims.z;
}
/// Returns the grid extents in thread blocks to launch
dim3 get_grid_dims() const
{
return block_mapping.get_grid_dims();
}
/// Lightweight update given a subset of arguments. Problem geometry is assumed
/// to remain the same.
void update(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversalStreamK::Params::update()");
// Update input/output pointers
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C = args.batch_stride_C;
batch_stride_D = args.batch_stride_D;
output_op = args.epilogue;
}
};
/// Tile work descriptor
struct TileWorkDesc
{
/// The linear tile index
int tile_idx;
/// The location of this tile (in threadblock-tile coordinates) in the output matrix
cutlass::gemm::GemmCoord tiled_coord;
// The first global-scoped MAC-iteration this threadblock will perform for this tile
int iter_begin;
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
int k_begin;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
int k_end;
/// The number of remaining MAC-iterations this threadblock will perform for this tile
int k_iters_remaining;
// Whether this block will perform the first iteration of this tile
CUTLASS_DEVICE
bool tile_started()
{
return (k_begin == 0);
}
// Whether this block will perform the last iteration of this tile
CUTLASS_DEVICE
bool tile_finished(Params const ¶ms)
{
return (k_end == params.block_mapping.problem_size.k());
}
};
/// Shared memory storage structure
union SharedStorage
{
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
protected:
//
// Data members
//
/// GEMM problem parameters
Params const ¶ms;
/// Shared storage reference
SharedStorage &shared_storage;
/// ID within the threadblock
int thread_idx;
/// ID of warp
int warp_idx;
/// ID of each thread within a warp
int lane_idx;
/// Block index
int block_idx;
/// Threadblock scoped epilogue
Epilogue epilogue;
public:
//
// Host-only dispatch API
//
/// Determines whether the GEMM problem size satisfies this kernel's
/// alignment requirements
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size)
{
CUTLASS_TRACE_HOST("GemmUniversal::can_implement()");
static int const kAlignmentA = (platform::is_same<LayoutA,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutA,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<LayoutB,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutB,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
/// Determines whether the GEMM problem satisfies this kernel's
/// alignment requirements
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
protected:
//
// Device-only utility methods
//
/// Iterator for fetching tile fragments from A
CUTLASS_DEVICE
typename Mma::IteratorA init_iterator_A(TileWorkDesc &tile_work)
{
// The input A matrix
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
// Update input pointers based on batched/array mode
if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A;
}
if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[tile_work.tiled_coord.k()];
}
int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM;
int m_end = params.block_mapping.problem_size.m();
return Mma::IteratorA(
params.params_A,
ptr_A,
{ m_end, tile_work.k_end },
threadIdx.x,
{ m_begin, tile_work.k_begin });
}
/// Iterator for fetching tile fragments from B
CUTLASS_DEVICE
typename Mma::IteratorB init_iterator_B(TileWorkDesc &tile_work)
{
// The input B matrix
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
// Update input pointers based on batched/array mode
if (params.mode == GemmUniversalMode::kBatched) {
ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B;
}
if (params.mode == GemmUniversalMode::kArray) {
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[tile_work.tiled_coord.k()];
}
int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN;
int n_end = params.block_mapping.problem_size.n();
return Mma::IteratorB(
params.params_B,
ptr_B,
{ tile_work.k_end, n_end },
threadIdx.x,
{ tile_work.k_begin, n_begin });
}
CUTLASS_DEVICE
void init_dp_tile_work(
TileWorkDesc &tile_work,
int tile_idx)
{
// The linear tile index
tile_work.tile_idx = tile_idx;
// The first global-scoped MAC-iteration this threadblock will perform for this tile
tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile;
// The number of MAC-iterations this threadblock will perform for this tile
tile_work.k_iters_remaining = params.block_mapping.iters_per_tile;
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_begin = 0;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_end = params.block_mapping.problem_size.k();
// The location of this tile (in threadblock-tile coordinates) in the output matrix
tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
}
CUTLASS_DEVICE
void init_sk_tile_work(
TileWorkDesc &tile_work,
int tile_idx,
int block_iter_begin,
int block_iter_end)
{
// The linear tile index
tile_work.tile_idx = tile_idx;
// The first global-scoped MAC-iteration for this tile
int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile;
// The first global-scoped MAC-iteration this threadblock will perform for this tile
tile_work.iter_begin = max(block_iter_begin, tile_iter_begin);
// The first tile-scoped MAC-iteration this threadblock will perform for this tile
int k_iter_begin = tile_work.iter_begin - tile_iter_begin;
// The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile
int k_iter_end = block_iter_end - tile_iter_begin;
// The number of MAC-iterations this threadblock will perform for this tile
tile_work.k_iters_remaining = k_iter_end - k_iter_begin;
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_begin = k_iter_begin * Mma::Shape::kK;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_end = min(
params.block_mapping.problem_size.k(), // extent of k domain
(k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment
// The location of this tile (in threadblock-tile coordinates) in the output matrix
tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
}
/// Share accumulators with peers
CUTLASS_DEVICE
void share_accumulators(AccumulatorTile const &accumulator_tile, int first_block_idx)
{
AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
int accum_tile_offset = first_block_idx * kThreadCount;
if (block_idx == first_block_idx)
{
// First peer initializes the workspace partials
BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
}
else
{
// Subsequent peers atomically accumulate into the workspace partials
if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic)
{
// Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them
Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1);
}
else
{
// Turnstile reduction order: wait until the previous peer has written
int wait_count = block_idx - first_block_idx;
Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count);
}
// Perform reduction in workspace
BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
}
// Signal our arrival
Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx);
}
/// Acquire accumulators from peers
CUTLASS_DEVICE
void acquire_accumulators(
AccumulatorTile &accumulator_tile,
int first_block_idx)
{
AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
// Wait for arrival
int num_carry_in = block_idx - first_block_idx;
Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in);
// Load and add peer-partials accumulator tile to local accumulator tile
int accum_tile_offset = first_block_idx * kThreadCount;
BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx);
}
/// Perform epilogue computations and output
CUTLASS_DEVICE
void do_epilogue(
TileWorkDesc &tile_work,
AccumulatorTile &accumulator_tile)
{
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
// Update pointers for batched/array mode(s)
if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += tile_work.tiled_coord.k() * params.batch_stride_C;
ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D;
}
if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[tile_work.tiled_coord.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[tile_work.tiled_coord.k()];
}
// Location of this tile in item-coords
MatrixCoord threadblock_item_begin(
tile_work.tiled_coord.m() * Mma::Shape::kM,
tile_work.tiled_coord.n() * Mma::Shape::kN
);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Execute the epilogue operator to update the destination tensor.
epilogue(
EpilogueOutputOp(params.output_op),
iterator_D,
accumulator_tile,
iterator_C);
}
CUTLASS_DEVICE
void separate_reduction(int reduce_idx)
{
int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx;
// Reduce by sk-tile (every tile contributed to by one or more blocks)
reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments;
reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments;
int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile;
int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile - 1;
peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first);
peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last);
// Wait for peers to complete
int peer_idx_end = peer_idx_last + 1;
int num_peers = peer_idx_end - peer_idx_begin;
Barrier::wait_eq_reset(
params.barrier_workspace,
thread_idx,
(reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx,
num_peers);
/// The location of this tile (in threadblock-tile coordinates) in the output matrix
GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx);
// Location of this tile in item-coords
MatrixCoord threadblock_item_begin(
tiled_coord.m() * Mma::Shape::kM,
tiled_coord.n() * Mma::Shape::kN
);
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
// Update pointers for batched/array mode(s)
if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += tiled_coord.k() * params.batch_stride_C;
ptr_D += tiled_coord.k() * params.batch_stride_D;
}
if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[tiled_coord.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[tiled_coord.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Execute the epilogue operator to update the destination tensor.
epilogue.reduce(
peer_idx_begin,
peer_idx_end,
reduce_fragment_idx,
params.partials_workspace,
EpilogueOutputOp(params.output_op),
iterator_D,
iterator_C);
}
CUTLASS_DEVICE
void process_tile(
TileWorkDesc tile_work,
int dp_start_block_idx,
int block_iter_begin)
{
// Initialize input iterators
typename Mma::IteratorA iterator_A = init_iterator_A(tile_work);
typename Mma::IteratorB iterator_B = init_iterator_B(tile_work);
// Initialize accumulators
AccumulatorTile accumulator_tile;
accumulator_tile.clear();
// Perform this tile's range of multiply-accumulate (MAC) iterations
Mma mma(
shared_storage.main_loop,
thread_idx,
warp_idx,
lane_idx);
mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile);
if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) ||
(params.block_mapping.reduction_blocks == 0) ||
(block_idx >= dp_start_block_idx))
{
//
// Cooperative SK peer reduction or DP block
//
int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx);
if (!tile_work.tile_finished(params)) {
// Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace
share_accumulators(accumulator_tile, first_block_idx);
}
else
{
// DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile
if (!tile_work.tile_started())
{
// A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks
acquire_accumulators(accumulator_tile, first_block_idx);
}
do_epilogue(tile_work, accumulator_tile);
}
}
else
{
//
// Separate peer reduction
//
// Share accumulator partial sums with peer threadblock(s) through scratch workspace
epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started());
// Signal arrival
Barrier::arrive_range_inc(
params.barrier_workspace,
thread_idx,
tile_work.tile_idx * Epilogue::kAccumulatorFragments,
Epilogue::kAccumulatorFragments);
}
}
/// Executes one GEMM
CUTLASS_DEVICE
void gemm()
{
// Initialize block's iteration range
int tile_idx, block_iter_begin, block_iters_remaining;
int sk_padding_start_block_idx = params.block_mapping.sk_regions * params.block_mapping.sk_blocks_per_region;
int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms;
int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks;
int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks;
if (block_idx < sk_padding_start_block_idx)
{
// This is a SK block
int block_iter_end;
params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end);
block_iters_remaining = block_iter_end - block_iter_begin;
tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1);
}
else if (block_idx < dp_start_block_idx)
{
// This is a filler block
return;
}
else if (block_idx < reduce_start_block_idx)
{
// This is a DP block
int dp_block_idx = block_idx - dp_start_block_idx;
int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles;
// Blocks in first DP wave get configured number of tiles
tile_idx = first_dp_tile + dp_block_idx;
int tile_allottment = params.block_mapping.dp_first_wave_tiles;
// Blocks in subsequent DP waves get 1 tile
if (dp_block_idx >= params.block_mapping.avail_sms) {
tile_allottment = 1;
tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms;
}
block_iter_begin = 0;
block_iters_remaining = params.block_mapping.iters_per_tile * tile_allottment;
}
else if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed) &&
(block_idx < grid_padding_start_block_idx))
{
// This is a reduction threadblock
int reduce_block_idx = block_idx - reduce_start_block_idx;
separate_reduction(reduce_block_idx);
return;
}
else
{
// This is a filler block
return;
}
// Iteration-processing loop body
CUTLASS_PRAGMA_NO_UNROLL
while (true)
{
// Initialize tile work descriptor
TileWorkDesc tile_work;
if (block_idx >= dp_start_block_idx)
{
init_dp_tile_work(tile_work, tile_idx);
// DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1)
if ((tile_idx < params.block_mapping.sk_tiles) ||
(tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape.m()) ||
(tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape.n()))
{
break;
}
}
else
{
init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
}
// Perform this block's share of work for this tile
process_tile(tile_work, dp_start_block_idx, block_iter_begin);
// Update remaining work for this block
block_iters_remaining -= tile_work.k_iters_remaining;
if (block_iters_remaining == 0) {
// Done
break;
}
// Continue to next tile
__syncthreads();
if (block_idx >= dp_start_block_idx)
{
// DP block consume their tiles at stride
tile_idx += params.block_mapping.avail_sms;
}
else
{
// SK blocks consume their tiles in backwards order
tile_idx--;
}
}
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmUniversalStreamk op(params, shared_storage);
op();
}
// Constructor
CUTLASS_DEVICE
GemmUniversalStreamk(
Params const ¶ms,
SharedStorage &shared_storage)
:
params(params),
shared_storage(shared_storage),
thread_idx(threadIdx.x),
warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code
lane_idx(threadIdx.x % 32),
block_idx(params.block_mapping.get_block_idx()),
epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx)
{}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()()
{
// Do the GEMM
gemm();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 38,687 | C | 32.151671 | 166 | 0.638432 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_pipelined.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Mma, typename Epilogue, typename ThreadblockSwizzle>
__global__ void GemmPipelined(
cutlass::gemm::GemmCoord problem_size,
cutlass::gemm::GemmCoord grid_tiled_shape,
typename Mma::IteratorA::Params params_A,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::Params params_B,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::Params params_epilogue
) {
// Shared storage needed by threadblock-scoped matrix multiply-accumulate
__shared__ union {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
} shared_storage;
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
int swizzle_log_tile = ThreadblockSwizzle().get_log_tile(grid_tiled_shape);
cutlass::gemm::GemmCoord tb_tile_offset = threadblock_swizzle.get_tile_offset(swizzle_log_tile);
if (grid_tiled_shape.m() <= tb_tile_offset.m() ||
grid_tiled_shape.n() <= tb_tile_offset.n()) {
return;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
tb_tile_offset.m() * Mma::Shape::kM,
tb_tile_offset.k()
};
cutlass::MatrixCoord tb_offset_B{
tb_tile_offset.k(),
tb_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int tb_thread_id = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params_A,
ref_A.data(),
{problem_size.m(), problem_size.k()},
tb_thread_id,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params_B,
ref_B.data(),
{problem_size.k(), problem_size.n()},
tb_thread_id,
tb_offset_B);
int warp_id = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_id = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, tb_thread_id, warp_id, lane_id);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
mma(problem_size, accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
Epilogue epilogue(
params_epilogue,
shared_storage.epilogue,
tb_thread_id,
warp_id,
lane_id);
tb_tile_offset = threadblock_swizzle.get_tile_offset(swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
tb_tile_offset.m() * Mma::Shape::kM,
tb_tile_offset.n() * Mma::Shape::kN
);
// run efficient epilogue
epilogue({problem_size.m(), problem_size.n()}, accumulators, threadblock_offset);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 5,165 | C | 31.490566 | 100 | 0.650726 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_grouped_problem_visitor.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Scheduler for grouped GEMM
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/gemm/kernel/grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
// Helper for correctly representing problem sizes in grouped kernels
template <
typename ThreadblockShape,
bool Transposed
>
struct GemmGroupedProblemSizeHelper {
static bool const kTransposed = Transposed;
CUTLASS_HOST_DEVICE
static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) {
return cutlass::gemm::GemmCoord(
((problem.m() - 1 + ThreadblockShape::kM) / ThreadblockShape::kM),
((problem.n() - 1 + ThreadblockShape::kN) / ThreadblockShape::kN),
1);
}
CUTLASS_HOST_DEVICE
static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {
if (kTransposed) {
swap(problem.m(), problem.n());
}
}
CUTLASS_HOST_DEVICE
static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) {
return grid.m() * grid.n();
}
};
} // namespace detail
/// Visitor class to abstract away the algorithm for iterating over tiles
template <typename ThreadblockShape,
GroupScheduleMode GroupScheduleMode_,
int PrefetchTileCount,
int ThreadCount,
bool Transposed = false>
struct GemmGroupedProblemVisitor : public GroupedProblemVisitor<
detail::GemmGroupedProblemSizeHelper<ThreadblockShape, Transposed>,
ThreadblockShape,
GroupScheduleMode_,
PrefetchTileCount,
ThreadCount> {
static bool const kTransposed = Transposed;
using ProblemSizeHelper = detail::GemmGroupedProblemSizeHelper<ThreadblockShape, Transposed>;
using Base = GroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape, GroupScheduleMode_, PrefetchTileCount, ThreadCount>;
using Params = typename Base::Params;
using SharedStorage = typename Base::SharedStorage;
//
// Methods
//
CUTLASS_DEVICE
GemmGroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base (params_, shared_storage_, block_idx)
{}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 4,691 | C | 37.146341 | 126 | 0.617566 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/symm_universal.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma1_, ///! Threadblock-scoped triangular matrix multiply-accumulate (A*B or B*A)
typename Mma2_, ///! Threadblock-scoped triangular matrix multiply-accumulate (AT*B or B*AT)
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
SideMode SideMode_, ///! Side Mode for the kernel (kLeft or kRight)
FillMode FillMode_ ///! Fill Mode for triangular matrix (kLower or kUpper)
>
struct SymmUniversal {
public:
using Mma1 = Mma1_;
using Mma2 = Mma2_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma1::IteratorA::Element;
using ElementB = typename Mma1::IteratorB::Element;
// Mma1 (TRMM - with diagonal: C_tmp = alpha * A * B)
using LayoutA = typename Mma1::IteratorA::Layout;
using LayoutBT = typename Mma1::IteratorB::Layout;
static ComplexTransform const kMma1TransformA = Mma1::kTransformA;
static ComplexTransform const kMma1TransformB = Mma1::kTransformB;
// Mma2 (TRMM - withOUT diagonal: alpha * AT * B)
using LayoutB = typename Mma2::IteratorA::Layout;
using LayoutAT = typename Mma2::IteratorB::Layout;
static ComplexTransform const kMma2TransformA = Mma2::kTransformA;
static ComplexTransform const kMma2TransformB = Mma2::kTransformB;
// Common type definitions for Mma1 and Mma2
using Operator = typename Mma1::Operator;
using OperatorClass = typename Mma1::Operator::OperatorClass;
using ThreadblockShape = typename Mma1::Shape;
using WarpShape = typename Mma1::Operator::Shape;
using InstructionShape = typename Mma1::Policy::Operator::InstructionShape;
using ArchTag = typename Mma1::ArchTag;
static int const kStages = Mma1::kStages;
static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements;
// Output related typedefinitions
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static SideMode const kSideModeA = SideMode_;
static FillMode const kFillModeA = FillMode_;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma1::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode;
GemmCoord problem_size;
int batch_count;
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc;
typename LayoutC::Stride::Index ldd;
//
// Methods
//
Arguments():
mode(GemmUniversalMode::kGemm),
batch_count(1),
ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr) { }
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd) {
}
/// Returns arguments for the transposed problem sizes
Arguments transposed_problem_size() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
return args;
}
/// Returns arguments for the transposed matrices
Arguments swapped_matrices() const {
Arguments args(*this);
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
// Mma1 Iterator A and B params
typename Mma1::IteratorA::Params params_A_mma1;
typename Mma1::IteratorB::Params params_B_mma1;
// Mma2 Iterator A and B params
typename Mma2::IteratorA::Params params_A_mma2;
typename Mma2::IteratorB::Params params_B_mma2;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
GemmUniversalMode mode;
int batch_count;
int gemm_k_size;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
int *semaphore;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
swizzle_log_tile(0),
params_A_mma1(0),
params_B_mma1(0),
params_A_mma2(0),
params_B_mma2(0),
params_C(0),
params_D(0),
batch_count(0),
gemm_k_size(0),
mode(cutlass::gemm::GemmUniversalMode::kGemm),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
batch_stride_A(0),
batch_stride_B(0),
batch_stride_C(0),
batch_stride_D(0),
semaphore(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
int gemm_k_size,
void *workspace = nullptr
):
problem_size(args.problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A_mma1(args.lda),
params_B_mma1(args.ldb),
params_A_mma2(args.lda),
params_B_mma2(args.ldb),
params_C(args.ldc),
params_D(args.ldd),
output_op(args.epilogue),
mode(args.mode),
batch_count(args.batch_count),
gemm_k_size(gemm_k_size),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(const_cast<void *>(args.ptr_D)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
semaphore(static_cast<int *>(workspace)) {
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr) {
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
output_op = args.epilogue;
semaphore = static_cast<int *>(workspace);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma1::SharedStorage mma1_main_loop;
typename Mma2::SharedStorage mma2_main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Methods
//
CUTLASS_DEVICE
SymmUniversal() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes two GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_MxK_mma1{
threadblock_tile_offset.m() * Mma1::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_KxN_mma1{
offset_k,
threadblock_tile_offset.n() * Mma1::Shape::kN
};
cutlass::MatrixCoord tb_offset_MxK_mma2{
threadblock_tile_offset.m() * Mma1::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_KxN_mma2{
offset_k,
threadblock_tile_offset.n() * Mma1::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply for Mma1
Mma1 mma1(shared_storage.mma1_main_loop, thread_idx, warp_idx, lane_idx);
// Construct thread-scoped matrix multiply for Mma2
Mma2 mma2(shared_storage.mma2_main_loop, thread_idx, warp_idx, lane_idx);
typename Mma1::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
int gemm_k_iterations_mma1 = gemm_k_iterations;
int gemm_k_iterations_mma2 = gemm_k_iterations;
/******************************************************************************************************
* SYMM (Side Mode, Fill Mode) is made of two TRMMs:
First TRMM (Mma1: Side Mode, Fill Mode, Non-Unit Diag): (A * B) or (B * A)
Second TRMM (Mma2: Side Mode, Inverted Fill Mode, Unit Diag): (AT * B) or (B * AT)
* For the first TRMM (Mma1) of SYMM, the following method is used to calculate the k-iterations:
First two cases: (Left Side, Lower Fill) and (Right Side, Upper Fill) are transpose of each other
- (Left Side, Lower Fill): calculate bottom of the CTA tile, then find the k-iterations
needed to process all elements till that coordinate.
- (Right Side, Upper Fill): calculate right end of the CTA tile, then find the k-iterations
needed to process all elements till that coordinate.
Last two cases: (Left Side, Upper Fill) and (Right Side, Lower Fill) are transpose of each other
- (Left Side, Upper Fill): calculate the top of the CTA tile, then find k-iterations
that can be skipped for all elements of this tile.
- (Right Side, Lower Fill): calculate the left start of the CTA tile, then find k-iterations
that can be skipped for all elements of this tile.
* For the second TRMM (Mma2) of SYMM, the k-iterations and threadblock offsets are calculated
the same way as the first TRMM (Mma1) of same side mode but with inverted fill mode.
For example, if the first TRMM is left sided with lower fill, the second TRMM would be
left sided with upper fill.
********************************************************************************************************/
if (kSideModeA == SideMode::kLeft && kFillModeA == FillMode::kLower) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 < gemm_k_iterations) {
gemm_k_iterations_mma1 = k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.m()) * Mma1::Shape::kM) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 != 0) {
tb_offset_MxK_mma2 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma2 * Mma1::Shape::kK});
tb_offset_KxN_mma2 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma2 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma2 -= k_iterations_till_diagonal_mma2;
}
} else if (kSideModeA == SideMode::kRight && kFillModeA == FillMode::kUpper) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.n() + 1) * Mma1::Shape::kN + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 < gemm_k_iterations) {
gemm_k_iterations_mma1 = k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.n()) * Mma1::Shape::kN) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 != 0) {
tb_offset_MxK_mma2 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma2 * Mma1::Shape::kK});
tb_offset_KxN_mma2 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma2 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma2 -= k_iterations_till_diagonal_mma2;
}
} else if (kSideModeA == SideMode::kLeft && kFillModeA == FillMode::kUpper) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.m()) * Mma1::Shape::kM) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 != 0) {
tb_offset_MxK_mma1 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma1 * Mma1::Shape::kK});
tb_offset_KxN_mma1 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma1 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma1 -= k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 < gemm_k_iterations) {
gemm_k_iterations_mma2 = k_iterations_till_diagonal_mma2;
}
} else if (kSideModeA == SideMode::kRight && kFillModeA == FillMode::kLower) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.n()) * Mma1::Shape::kN) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 != 0) {
tb_offset_MxK_mma1 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma1 * Mma1::Shape::kK});
tb_offset_KxN_mma1 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma1 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma1 -= k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.n() + 1) * Mma1::Shape::kN + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 < gemm_k_iterations) {
gemm_k_iterations_mma2 = k_iterations_till_diagonal_mma2;
}
}
// Construct iterators to A and B operands for Mma1
typename Mma1::IteratorA iterator_A_mma1(
params.params_A_mma1,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK_mma1);
typename Mma1::IteratorB iterator_B_mma1(
params.params_B_mma1,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_KxN_mma1);
// Construct iterators to A and B operands for Mma2
typename Mma2::IteratorA iterator_A_mma2(
params.params_A_mma2,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK_mma2);
typename Mma2::IteratorB iterator_B_mma2(
params.params_B_mma2,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_KxN_mma2);
// Compute threadblock-scoped matrix multiply-add (A x B) or (B x A)
mma1(
gemm_k_iterations_mma1,
accumulators,
iterator_A_mma1,
iterator_B_mma1,
accumulators);
// Compute threadblock-scoped matrix multiply-add (AT x B) or (B x AT)
mma2(
gemm_k_iterations_mma2,
accumulators,
iterator_A_mma2,
iterator_B_mma2,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma1::Shape::kM,
threadblock_tile_offset.n() * Mma1::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 23,900 | C | 33.193133 | 138 | 0.628828 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemm_grouped_softmax_mainloop_fusion.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level softmax-grouped-GEMM
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/kernel/gemm_grouped_softmax_mainloop_fusion.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_complex.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/threadblock/default_mma_softmax_mainloop_fusion.h"
#include "cutlass/layout/permute.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for Scale/Bias vectors
typename ElementScaleBias_,
/// Layout type for Scale/Bias vectors
typename LayoutScaleBias_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly,
/// Operation performed by GEMM
typename Operator = typename device::DefaultGemmConfiguration<
OperatorClass, ArchTag, ElementA_, ElementB_, ElementC_,
ElementAccumulator>::Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone
>
struct DefaultGemmGroupedSoftmaxMainloopFusion {
// If true, we must construct a 'transposed-and-exchanged' Mma operator.
static bool const kInternalTranspose = platform::is_same<LayoutC_, layout::ColumnMajor>::value;
using MapArguments = kernel::detail::MapArguments<
ElementA_,
LayoutA_,
ComplexTransform::kNone,
kAlignmentA,
ElementB_,
LayoutB_,
ComplexTransform::kNone,
kAlignmentB,
LayoutC_,
kInternalTranspose
>;
private:
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMmaSoftmaxMainloopFusion<
typename MapArguments::ElementA, typename MapArguments::LayoutA, MapArguments::kAlignmentA,
typename MapArguments::ElementB, typename MapArguments::LayoutB, MapArguments::kAlignmentB,
ElementScaleBias_, LayoutScaleBias_, ElementAccumulator, layout::RowMajor, OperatorClass, ArchTag,
ThreadblockShape, WarpShape, InstructionShape, Stages, kInternalTranspose,
Operator, false, SharedMemoryClear>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount>::Epilogue;
public:
using GemmKernel = kernel::GemmGroupedSoftmaxMainloopFusion<
Mma,
Epilogue,
ThreadblockSwizzle,
GroupScheduleMode_,
kInternalTranspose
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 6,592 | C | 38.957576 | 104 | 0.677336 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_params.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
struct GemmParams {
//
// Type definitions
//
using Index = int32_t;
using LongIndex = int64_t;
using MmaIteratorParams = typename cutlass::transform::threadblock::PredicatedTileAccessIteratorParams;
using EpilogueIteratorParams = typename cutlass::epilogue::threadblock::PredicatedTileIteratorParams;
//
// Data members
//
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
// Data members for Mma::Iterator::Params
MmaIteratorParams params_itr_a;
MmaIteratorParams params_itr_b;
// Data member for Epilogue::OutputTileIterator::Params
EpilogueIteratorParams params_itr_c;
EpilogueIteratorParams params_itr_d;
GemmUniversalMode mode;
int batch_count;
int gemm_k_size;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
LongIndex lda;
LongIndex ldb;
LongIndex ldc;
LongIndex ldd;
LongIndex batch_stride_A;
LongIndex batch_stride_B;
LongIndex batch_stride_C;
LongIndex batch_stride_D;
int *semaphore;
//
// Methods
//
CUTLASS_HOST_DEVICE
GemmParams() {}
CUTLASS_HOST_DEVICE
GemmParams(
cutlass::gemm::GemmCoord problem_size_,
cutlass::gemm::GemmCoord grid_tiled_shape_,
int swizzle_log_tile_,
GemmUniversalMode mode_,
int batch_count_,
int gemm_k_size_,
void const * ptr_A_,
void const * ptr_B_,
void const * ptr_C_,
void * ptr_D_,
LongIndex lda_,
LongIndex ldb_,
LongIndex ldc_,
LongIndex ldd_,
int64_t batch_stride_A_,
int64_t batch_stride_B_,
int64_t batch_stride_C_,
int64_t batch_stride_D_,
MmaIteratorParams const & params_itr_a_,
MmaIteratorParams const & params_itr_b_,
EpilogueIteratorParams const & params_itr_c_,
EpilogueIteratorParams const & params_itr_d_,
void *workspace_ = nullptr) :
problem_size(problem_size_),
grid_tiled_shape(grid_tiled_shape_),
swizzle_log_tile(swizzle_log_tile_),
mode(mode_),
batch_count(batch_count_),
gemm_k_size(gemm_k_size_),
ptr_A(const_cast<void *>(ptr_A_)),
ptr_B(const_cast<void *>(ptr_B_)),
ptr_C(const_cast<void *>(ptr_C_)),
ptr_D(ptr_D_),
lda(lda_),
ldb(ldb_),
ldc(ldc_),
ldd(ldd_),
batch_stride_A(batch_stride_A_),
batch_stride_B(batch_stride_B_),
batch_stride_C(batch_stride_C_),
batch_stride_D(batch_stride_D_),
params_itr_a(params_itr_a_),
params_itr_b(params_itr_b_),
params_itr_c(params_itr_c_),
params_itr_d(params_itr_d_),
semaphore(static_cast<int *>(workspace_)
) { }
CUTLASS_HOST_DEVICE
void update(
void const * ptr_A_,
void const * ptr_B_,
void const * ptr_C_,
void * ptr_D_,
int64_t batch_stride_A_,
int64_t batch_stride_B_,
int64_t batch_stride_C_,
int64_t batch_stride_D_,
void *workspace_ = nullptr) {
ptr_A = const_cast<void *>(ptr_A_);
ptr_B = const_cast<void *>(ptr_B_);
ptr_C = const_cast<void *>(ptr_C_);
ptr_D = ptr_D_;
batch_stride_A = batch_stride_A_;
batch_stride_B = batch_stride_B_;
batch_stride_C = batch_stride_C_;
batch_stride_D = batch_stride_D_;
semaphore = static_cast<int *>(workspace_);
CUTLASS_TRACE_HOST("GemmParams::update()");
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 6,144 | C | 29.725 | 107 | 0.620117 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/rank_2k_grouped.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Grouped Rank2K kernel.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/kernel/rank_2k_transpose_operands.h"
#include "cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma1_, ///! Threadblock-scoped matrix multiply-accumulate (A*B^T)
typename Mma2_, ///! Threadblock-scoped matrix multiply-accumulate (B*A^T)
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
ComplexTransform OriginalTransformA_, ///! Public-facing transformation on A
ComplexTransform OriginalTransformB_, ///! Public-facing transformation on B
FillMode FillModeC_, ///! Fill Mode for C (kLower or kUpper)
BlasMode BlasMode_, ///! Blas3 computation mode
GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform
bool Transposed = false
>
struct Rank2KGrouped {
public:
using Mma1 = Mma1_;
using Mma2 = Mma2_;
static_assert(platform::is_same<typename Mma1::LayoutC, cutlass::layout::RowMajor>::value &&
platform::is_same<typename Mma2::LayoutC, cutlass::layout::RowMajor>::value,
"Kernel-level grouped Rank2K requires that LayoutC be row major.");
// Define generic Mma for usecases that use Kernel::Mma
using Mma = Mma1_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_;
static bool const kTransposed = Transposed;
// Public-facing type definitions related to operand element type, layout, and complex conjugate
// operation. Must interact with the 'kTransposed' notion to reflect the original layout,
// fill mode, etc. passed in.
//
// Recall that a Rank2K operation performs (A x BT) + (B x AT)
// This is performed via:
// Mma1 = (A x BT)
// Mma2 = (B x AT)
//
// However, if C needs to be transposed, then this is changed to the following:
// Mma1 = (B x AT)
// Mma2 = (A x BT)
//
// The transformation above is achieved by swapping the Layouts/Elements/Transforms/etc.
// of A and B as they are passed into the instantiations of Mma1 and Mma2.
//
// Now, given access to only Mma1 and Mma2, as well as whether a transposition has occurred,
// we wish to retrieve the original Layouts/Elements/etc. for A and B that were passed into
// the device-level call.
//
// The logic to do this (which is made clearer by referencing the above instantiations) is as follows:
// LayoutA = kTransposed ? Mma2::LayoutA : Mma1::LayoutA
// LayoutB = kTransposed ? Mma1::LayoutA : Mma2::LayoutA
//
// We achieve this swapping by passing Mma1::*A and Mma2::*B to Rank2KMapArguments:
using MapArgumentsA = kernel::detail::Rank2KMapArguments<
typename Mma1::IteratorA::Element,
typename Mma1::IteratorA::Layout,
Mma1::kTransformA,
Mma1::IteratorA::AccessType::kElements,
typename Mma2::IteratorA::Element,
typename Mma2::IteratorA::Layout,
Mma2::kTransformA,
Mma2::IteratorA::AccessType::kElements,
typename Mma1::LayoutC,
FillModeC_,
kTransposed
>;
using ElementA = typename MapArgumentsA::ElementA;
using LayoutA = typename MapArgumentsA::LayoutA;
static int const kAlignmentA = MapArgumentsA::kAlignmentA;
using MapArgumentsB = kernel::detail::Rank2KMapArguments<
typename Mma2::IteratorA::Element,
typename Mma2::IteratorA::Layout,
Mma2::kTransformA,
Mma2::IteratorA::AccessType::kElements,
typename Mma1::IteratorA::Element,
typename Mma1::IteratorA::Layout,
Mma1::kTransformA,
Mma1::IteratorA::AccessType::kElements,
typename Mma2::LayoutC,
FillModeC_,
kTransposed
>;
using ElementB = typename MapArgumentsB::ElementA;
using LayoutB = typename MapArgumentsB::LayoutA;
static int const kAlignmentB = MapArgumentsB::kAlignmentA;
// Use the user-provided TransformA and TransformB, rather than those
// resulting from MapArguments, because Mma1 and Mma2 may have different
// complex transforms than those passed in by the user.
// (See kernel/rank_2k_complex.h for an example of this)
static cutlass::ComplexTransform const kTransformA = OriginalTransformA_;
static cutlass::ComplexTransform const kTransformB = OriginalTransformB_;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename MapArgumentsA::LayoutC;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
static FillMode const kFillModeC = MapArgumentsA::kFillModeC;
// Common type definitions for Mma1 and Mma2
using Operator = typename Mma1::Operator;
using OperatorClass = typename Mma1::Operator::OperatorClass;
using ThreadblockShape = typename Mma1::Shape;
using WarpShape = typename Mma1::Operator::Shape;
using InstructionShape = typename Mma1::Policy::Operator::InstructionShape;
using ArchTag = typename Mma1::ArchTag;
static int const kStages = Mma1::kStages;
static BlasMode const kBlasMode = BlasMode_;
private:
static FillMode const kInternalFillModeC = FillModeC_;
public:
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma1::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using ProblemVisitor = Rank2KGroupedProblemVisitor<
ThreadblockShape,
kGroupScheduleMode,
kThreadCount,
kThreadCount,
kInternalFillModeC>;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode;
GemmCoord *problem_sizes;
int problem_count;
int threadblock_count;
typename EpilogueOutputOp::Params epilogue;
ElementA ** ptr_A;
ElementB ** ptr_B;
ElementC ** ptr_C;
ElementC ** ptr_D;
typename LayoutA::Stride::LongIndex *lda;
typename LayoutB::Stride::LongIndex *ldb;
typename LayoutC::Stride::LongIndex *ldc;
typename LayoutC::Stride::LongIndex *ldd;
// Only used by device-level operator
GemmCoord *host_problem_sizes;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments():
mode(GemmUniversalMode::kGemm),
problem_count(0),
threadblock_count(0),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
lda(nullptr),
ldb(nullptr),
ldc(nullptr),
ldd(nullptr),
host_problem_sizes(nullptr)
{
}
/// Ctor
CUTLASS_HOST_DEVICE
Arguments(
GemmUniversalMode mode,
GemmCoord *problem_sizes,
int problem_count,
int threadblock_count,
typename EpilogueOutputOp::Params epilogue,
ElementA ** ptr_A,
ElementB ** ptr_B,
ElementC ** ptr_C,
ElementC ** ptr_D,
typename LayoutA::Stride::LongIndex *lda,
typename LayoutB::Stride::LongIndex *ldb,
typename LayoutC::Stride::LongIndex *ldc,
typename LayoutC::Stride::LongIndex *ldd,
GemmCoord *host_problem_sizes=nullptr
):
mode(mode),
problem_sizes(problem_sizes),
problem_count(problem_count),
threadblock_count(threadblock_count),
epilogue(epilogue),
ptr_A(ptr_A),
ptr_B(ptr_B),
ptr_C(ptr_C),
ptr_D(ptr_D),
lda(lda),
ldb(ldb),
ldc(ldc),
ldd(ldd),
host_problem_sizes(host_problem_sizes)
{
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
typename ProblemVisitor::Params problem_visitor;
int threadblock_count;
typename EpilogueOutputOp::Params output_op;
GemmUniversalMode mode;
int batch_count;
ElementA ** ptr_A;
ElementB ** ptr_B;
ElementC ** ptr_C;
ElementC ** ptr_D;
typename LayoutA::Stride::LongIndex *lda;
typename LayoutB::Stride::LongIndex *ldb;
typename LayoutC::Stride::LongIndex *ldc;
typename LayoutC::Stride::LongIndex *ldd;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
mode(cutlass::gemm::GemmUniversalMode::kGemm),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
lda(nullptr),
ldb(nullptr),
ldc(nullptr),
ldd(nullptr)
{ }
CUTLASS_HOST_DEVICE
Params(Arguments const &args, void *workspace = nullptr, int tile_count = 0):
problem_visitor(args.problem_sizes, args.problem_count, workspace, tile_count),
threadblock_count(args.threadblock_count),
output_op(args.epilogue),
ptr_A(args.ptr_A),
ptr_B(args.ptr_B),
ptr_C(args.ptr_C),
ptr_D(args.ptr_D),
lda(args.lda),
ldb(args.ldb),
ldc(args.ldc),
ldd(args.ldd)
{
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr,
int tile_count = 0) {
problem_visitor = typename ProblemVisitor::Params(args.problem_sizes, args.problem_count, workspace, tile_count);
threadblock_count = args.threadblock_count;
output_op = args.output_op;
ptr_A = args.ptr_A;
ptr_B = args.ptr_B;
ptr_C = args.ptr_C;
ptr_D = args.ptr_D;
}
};
/// Shared memory storage structure
struct SharedStorage {
union {
typename Mma1::SharedStorage mma1_main_loop;
typename Mma2::SharedStorage mma2_main_loop;
typename Epilogue::SharedStorage epilogue;
} kernel;
// ProblemVisitor shared storage can't be overlapped with others
typename ProblemVisitor::SharedStorage problem_visitor;
};
public:
//
// Methods
//
CUTLASS_DEVICE
Rank2KGrouped() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) {
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
//
// Problem visitor.
//
ProblemVisitor problem_visitor(
params.problem_visitor,
shared_storage.problem_visitor,
blockIdx.x);
// Outer 'persistent' loop to iterate over tiles
while (problem_visitor.next_tile()) {
GemmCoord problem_size = problem_visitor.problem_size();
int32_t problem_idx = problem_visitor.problem_index();
int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx());
GemmCoord grid_shape = problem_visitor.grid_shape(problem_size);
cutlass::gemm::GemmCoord threadblock_tile_offset = problem_visitor.threadblock_offset(threadblock_idx);
//
// Perform checks to determine whether the results of this threadblock will be needed.
// An example of an unneeded threadblock is one that is assigned to compute in the upper
// portion of a Rank2K kernel filled with mode kLower.
//
// TODO: Consider pushing these checks into ProblemVisitor to avoid spuriously
// returning from `next_tile()`.
//
// Early exit if threadblock is out of range
if (grid_shape.m() <= threadblock_tile_offset.m() ||
grid_shape.n() <= threadblock_tile_offset.n()) {
// Next tile
problem_visitor.advance(gridDim.x);
continue;
}
// Skip this tile if Fill Mode is Lower and
// if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal)
if (kInternalFillModeC == cutlass::FillMode::kLower &&
(threadblock_tile_offset.m() + 1) * Mma1::Shape::kM <= threadblock_tile_offset.n() * Mma1::Shape::kN) {
// Next tile
problem_visitor.advance(gridDim.x);
continue;
}
// Skip this tile if Fill Mode is Upper and
// if the entire tile is below the main diagonal (top-right corner is at or below the diagonal)
if (kInternalFillModeC == cutlass::FillMode::kUpper &&
threadblock_tile_offset.m() * Mma1::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) {
// Next tile
problem_visitor.advance(gridDim.x);
continue;
}
bool tile_on_diagonal = false;
// Mark tiles that are being crossed by the main diagonal
// (top-right and bottom-left corners are on either side of the diagonal)
if ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM > threadblock_tile_offset.n() * Mma1::Shape::kN
&& threadblock_tile_offset.m() * Mma1::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) {
tile_on_diagonal = true;
}
int offset_k = 0;
int problem_size_k = problem_size.k();
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < grid_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * problem_size.k();
}
offset_k = threadblock_tile_offset.k() * problem_size.k();
}
ElementA *ptr_A = reinterpret_cast<ElementA *>((kTransposed ? params.ptr_B[problem_idx] : params.ptr_A[problem_idx]));
typename LayoutA::Stride::LongIndex ldm_A = (kTransposed ? params.ldb[problem_idx] : params.lda[problem_idx]);
ElementB *ptr_B = reinterpret_cast<ElementB *>((kTransposed ? params.ptr_A[problem_idx] : params.ptr_B[problem_idx]));
typename LayoutB::Stride::LongIndex ldm_B = (kTransposed ? params.lda[problem_idx] : params.ldb[problem_idx]);
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_MxK{
threadblock_tile_offset.m() * Mma1::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_KxN{
offset_k,
threadblock_tile_offset.n() * Mma1::Shape::kN
};
// Assume identity swizzle
MatrixCoord tb_offset(
threadblock_tile_offset.m() * Mma1::Shape::kM,
threadblock_tile_offset.n() * Mma1::Shape::kN
);
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands for Mma1
typename Mma1::IteratorA iterator_A(
Mma1::IteratorA::Params(ldm_A),
ptr_A,
{problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK);
typename Mma1::IteratorB iterator_BT(
Mma1::IteratorB::Params(ldm_B),
ptr_B,
{problem_size_k, problem_size.n()},
thread_idx,
tb_offset_KxN);
// Construct iterators to A and B operands for Mma2
typename Mma2::IteratorA iterator_B(
Mma2::IteratorA::Params(ldm_B),
ptr_B,
{problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK);
typename Mma2::IteratorB iterator_AT(
Mma2::IteratorB::Params(ldm_A),
ptr_A,
{problem_size_k, problem_size.n()},
thread_idx,
tb_offset_KxN);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply for Mma1 (A x BT)
Mma1 mma1(shared_storage.kernel.mma1_main_loop, thread_idx, warp_idx, lane_idx);
// Construct thread-scoped matrix multiply for Mma2 (B x AT)
Mma2 mma2(shared_storage.kernel.mma2_main_loop, thread_idx, warp_idx, lane_idx);
typename Mma1::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
// Wait for all threads to finish their epilogue phases from the previous tile.
__syncthreads();
// Compute threadblock-scoped matrix multiply-add (A x BT)
mma1(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_BT,
accumulators);
// HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK.
if (kBlasMode == BlasMode::kHermitian) {
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * grid_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C[problem_idx]);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D[problem_idx]);
// If TB not on diagonal, FillMode doesn't apply.
FillMode kFillModeTB = tile_on_diagonal ? kInternalFillModeC : FillMode::kNone;
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
Epilogue::OutputTileIterator::Params(params.ldc[problem_idx]),
ptr_C,
problem_size.mn(),
thread_idx,
tb_offset,
kFillModeTB
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
Epilogue::OutputTileIterator::Params(params.ldd[problem_idx]),
ptr_D,
problem_size.mn(),
thread_idx,
tb_offset,
kFillModeTB
);
Epilogue epilogue(
shared_storage.kernel.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
__syncthreads();
accumulators.clear();
}
// Compute threadblock-scoped matrix multiply-add (B x AT)
mma2(
gemm_k_iterations,
accumulators,
iterator_B,
iterator_AT,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
/* Needed for HER2K where the second HERK is multiplied by conj(alpha) */
typename EpilogueOutputOp::Params second_her2k_params(conj(params.output_op.alpha), 1);
EpilogueOutputOp output_op_her2k(second_her2k_params);
//
// Masked tile iterators constructed from members
//
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * grid_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C[problem_idx]);
// HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK.
if (kBlasMode == BlasMode::kHermitian) {
ptr_C = static_cast<ElementC *>(params.ptr_D[problem_idx]);
}
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D[problem_idx]);
// If TB not on diagonal, FillMode doesn't apply.
FillMode kFillModeTB = tile_on_diagonal ? kInternalFillModeC : FillMode::kNone;
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
Epilogue::OutputTileIterator::Params(params.ldc[problem_idx]),
ptr_C,
problem_size.mn(),
thread_idx,
tb_offset,
kFillModeTB
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
Epilogue::OutputTileIterator::Params(params.ldd[problem_idx]),
ptr_D,
problem_size.mn(),
thread_idx,
tb_offset,
kFillModeTB
);
Epilogue epilogue(
shared_storage.kernel.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
if (kBlasMode == BlasMode::kSymmetric) {
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
} else {
epilogue(
output_op_her2k,
iterator_D,
accumulators,
iterator_C);
}
// Next tile
problem_visitor.advance(gridDim.x);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 22,962 | C | 31.571631 | 124 | 0.631086 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/conv2d_problem_size.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This file contains definitions and utility functions for describing convolution problem sizes.
Conv2dProblem desciption:
activation (NHWC),
filter (KRSC),
output (NPQK),
pading (pad_h, pad_w),
stride (stride_h, stride_w),
dilation (dilation_h, dilation_w).
Free functions to map:
Map tensor extents (Conv2d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_extent(ConvolutionOperator)
Map tensor sizes (Conv2d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_size(ConvolutionOperator)
Map tensor problem sizes (Conv2d -> ImplicitGemm): implicit_gemm_problem_size(ConvolutionOperator)
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cmath>
#else
#include <cmath>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/functional.h"
namespace cutlass {
namespace conv {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Problem size structure
struct Conv2dProblemSize {
// Conv2d strictly problem size parameters
int N, H, W, C, P, Q, K, R, S;
int pad_h, pad_w;
int stride_h, stride_w;
int dilation_h, dilation_w;
Mode mode;
// Conv2d implementation-related parameters
int split_k_slices;
int groups;
//
// Methods
//
public:
CUTLASS_HOST_DEVICE
Conv2dProblemSize():
N(0), H(0), W(0), C(0), P(0), Q(0), K(0), R(0), S(0),
pad_h(0), pad_w(0), stride_h(1), stride_w(1), dilation_h(1), dilation_w(1),
mode(Mode::kConvolution), split_k_slices(1), groups(1) { }
/// Constructor for default padding, stride, dilation, and split-K
CUTLASS_HOST_DEVICE
Conv2dProblemSize(
int N,
int H,
int W,
int C,
int P,
int Q,
int K,
int R,
int S,
Mode mode
):
N(N), H(H), W(W), C(C), P(P), Q(Q), K(K), R(R), S(S),
pad_h(R / 2), pad_w(S / 2), stride_h(1), stride_w(1), dilation_h(1), dilation_w(1),
mode(mode), split_k_slices(1), groups (1) { }
/// Constructor
CUTLASS_HOST_DEVICE
Conv2dProblemSize(
int N,
int H,
int W,
int C,
int K,
int R,
int S,
int P,
int Q,
int pad_h,
int pad_w,
int stride_h,
int stride_w,
int dilation_h,
int dilation_w,
Mode mode,
int split_k_slices = 1,
int groups = 1
):
N(N), H(H), W(W), C(C), K(K), R(R), S(S), P(P), Q(Q),
pad_h(pad_h), pad_w(pad_w), stride_h(stride_h), stride_w(stride_w),
dilation_h(dilation_h), dilation_w(dilation_w),
mode(mode), split_k_slices(split_k_slices), groups (groups) { }
/// Constructs convolution problem size from cutlass Tensor4DCoord and MatrixCoord
// set user-defined output size and sets P and Q (include all data members in ctor)
CUTLASS_HOST_DEVICE
Conv2dProblemSize(
cutlass::Tensor4DCoord input_size, // NHWC
cutlass::Tensor4DCoord filter_size, // KRSC
cutlass::Tensor4DCoord padding, // pad_h, _, pad_w, _
cutlass::MatrixCoord stride, // stride_h, stride_w
cutlass::MatrixCoord dilation, // dilation_h, dilation_w
cutlass::Tensor4DCoord output_size, // NPQK
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation,
int split_k_slices = 1,
int groups = 1
):
N(input_size.n()), H(input_size.h()), W(input_size.w()), C(input_size.c()),
K(filter_size.n()), R(filter_size.h()), S(filter_size.w()),
pad_h(padding[0]), pad_w(padding[2]),
stride_h(stride.row()), stride_w(stride.column()),
dilation_h(dilation.row()), dilation_w(dilation.column()),
P(output_size.h()), Q(output_size.w()),
mode(mode), split_k_slices(split_k_slices), groups(groups) {}
/// Constructs convolution problem size from cutlass Tensor4DCoord and MatrixCoord
// computes output size and sets P and Q (skip output from ctor arguments)
CUTLASS_HOST_DEVICE
Conv2dProblemSize(
cutlass::Tensor4DCoord input_size, // NHWC
cutlass::Tensor4DCoord filter_size, // KRSC
cutlass::Tensor4DCoord padding, // pad_h, _, pad_w, _
cutlass::MatrixCoord stride, // stride_h, stride_w
cutlass::MatrixCoord dilation, // dilation_h, dilation_w
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation,
int split_k_slices = 1,
int groups = 1
):
N(input_size.n()), H(input_size.h()), W(input_size.w()), C(input_size.c()),
K(filter_size.n()), R(filter_size.h()), S(filter_size.w()),
pad_h(padding[0]), pad_w(padding[2]),
stride_h(stride.row()), stride_w(stride.column()),
dilation_h(dilation.row()), dilation_w(dilation.column()),
mode(mode), split_k_slices(split_k_slices), groups(groups) {
// set output P and Q
P = ((H + pad_h * 2 - R * dilation_h) / stride_h) + 1;
Q = ((W + pad_w * 2 - S * dilation_w) / stride_w) + 1;
}
/// Constructs convolution problem size from cutlass Tensor4DCoord and MatrixCoord
// set user-defined output size and sets P and Q (skip padding, striding, and dilation)
CUTLASS_HOST_DEVICE
Conv2dProblemSize(
cutlass::Tensor4DCoord input_size, // NHWC
cutlass::Tensor4DCoord filter_size, // KRSC
cutlass::Tensor4DCoord output_size, // NPQK
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation,
int split_k_slices = 1,
int groups = 1
):
N(input_size.n()), H(input_size.h()), W(input_size.w()), C(input_size.c()),
K(filter_size.n()), R(filter_size.h()), S(filter_size.w()),
P(output_size.h()), Q(output_size.w()),
pad_h(R / 2), pad_w(S / 2), stride_h(1), stride_w(1),
dilation_h(1), dilation_w(1),
mode(mode), split_k_slices(split_k_slices), groups(groups) {}
// Reset covolution mode in the problem
CUTLASS_HOST_DEVICE
Conv2dProblemSize reset_mode(cutlass::conv::Mode mode_) {
Conv2dProblemSize tmp(*this);
tmp.mode = mode_;
return tmp;
}
// Reset covolution mode in the problem
CUTLASS_HOST_DEVICE
Conv2dProblemSize reset_split_k_slices(int split_k_slices_) {
Conv2dProblemSize tmp(*this);
tmp.split_k_slices = split_k_slices_;
return tmp;
}
/// Equality operator (ignores mode and split_k_slice)
CUTLASS_HOST_DEVICE
bool operator==(Conv2dProblemSize const &conv) const {
return (
(N == conv.N) && (H == conv.H) && (W == conv.W) && (C == conv.C) &&
(K == conv.K) && (R == conv.R) && (S == conv.S) &&
(P == conv.P) && (Q == conv.Q) &&
(pad_h == conv.pad_h) && (pad_w == conv.pad_w) &&
(stride_h == conv.stride_h) && (stride_w == conv.stride_w) &&
(dilation_h == conv.dilation_h) && (dilation_w == conv.dilation_w)
);
}
/// Inequality operator
CUTLASS_HOST_DEVICE
bool operator!=(Conv2dProblemSize const &rhs) const {
return !(*this == rhs);
}
/// Returns activation extent as Tensor4DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor4DCoord activation_extent() const {
return cutlass::Tensor4DCoord ({N, H, W, C});
}
/// Returns filter extent as Tensor4DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor4DCoord filter_extent() const {
return cutlass::Tensor4DCoord ({K, R, S, C / groups});
}
/// Returns output extent as Tensor4DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor4DCoord output_extent() const {
return cutlass::Tensor4DCoord ({N, P, Q, K});
}
/// Returns activation size in number of elements
CUTLASS_HOST_DEVICE
int64_t activation_size() const {
return (N * H * W * C);
}
/// Returns filter size in number of elements
CUTLASS_HOST_DEVICE
int64_t filter_size() const {
return (K * R * S * C / groups);
}
/// Returns output size in number of elements
CUTLASS_HOST_DEVICE
int64_t output_size() const {
return (N * P * Q * K);
}
/// Returns padding as Tensor4DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor4DCoord padding() const {
return cutlass::Tensor4DCoord ({pad_h, pad_h, pad_w, pad_w});
}
/// Returns stride as MatrixCoord
CUTLASS_HOST_DEVICE
cutlass::MatrixCoord stride() const {
return cutlass::MatrixCoord ({stride_h, stride_w});
}
/// Returns dilation as MatrixCoord
CUTLASS_HOST_DEVICE
cutlass::MatrixCoord dilation() const {
return cutlass::MatrixCoord ({dilation_h, dilation_w});
}
/////////////////////////////////////////////////////////////////
// Methods used for strided dgrad implementation
/////////////////////////////////////////////////////////////////
/// Number of filter r positions to accumulate in gemm-k dim
CUTLASS_HOST_DEVICE
int num_gemm_k_filter_r(int r) const {
return ((R - r + stride_h - 1) / stride_h);
}
/// Number of filter s positions to accumulate in gemm-k dim
CUTLASS_HOST_DEVICE
int num_gemm_k_filter_s(int s) const {
return ((S - s + stride_w - 1) / stride_w);
}
/// Number of filter positions to accumulate in gemm-k dim
CUTLASS_HOST_DEVICE
int num_gemm_k_filter_positions(int r, int s) const {
return num_gemm_k_filter_r(r) * num_gemm_k_filter_s(s);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// ImplicitGemm helper functions //
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Determine the problem size of the implicit GEMM operation
CUTLASS_HOST_DEVICE
cutlass::gemm::GemmCoord implicit_gemm_problem_size(
Operator conv_operator,
Conv2dProblemSize const &problem_size) {
// Compute problem size
switch (conv_operator) {
case Operator::kFprop:
return gemm::GemmCoord(
problem_size.N * problem_size.P * problem_size.Q,
problem_size.K,
problem_size.R * problem_size.S * problem_size.C / problem_size.groups
);
case Operator::kDgrad:
return gemm::GemmCoord(
problem_size.N * problem_size.H * problem_size.W,
problem_size.C,
problem_size.R * problem_size.S * problem_size.K
);
case Operator::kWgrad:
return gemm::GemmCoord(
problem_size.K,
problem_size.R * problem_size.S * problem_size.C,
problem_size.N * problem_size.P * problem_size.Q
);
default:
break;
}
return gemm::GemmCoord();
}
// Determine the number of gemm_k iterations for conv2d problem using implicit gemm algorithm
CUTLASS_HOST_DEVICE
int implicit_gemm_k_iterations(
Operator conv_operator,
int threadblock_K,
Conv2dProblemSize const &problem_size,
IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic,
GroupMode group_mode = GroupMode::kNone,
int threadblock_N = 0) {
int iterations = 0;
if (group_mode == GroupMode::kNone) {
if (algorithm == IteratorAlgorithm::kFixedChannels) {
int positions_per_iteration = threadblock_K / problem_size.C;
switch (conv_operator) {
case Operator::kFprop:
iterations = (problem_size.R * problem_size.S + positions_per_iteration - 1 ) / positions_per_iteration;
break;
default:
break;
}
}
else if (algorithm == IteratorAlgorithm::kFewChannels) {
switch (conv_operator) {
case Operator::kFprop:
iterations = (problem_size.R * problem_size.S * problem_size.C + threadblock_K - 1 ) / threadblock_K;
break;
default:
break;
}
}
else {
int elements_per_split_k_slice = 0;
switch (conv_operator) {
case Operator::kFprop:
elements_per_split_k_slice = (problem_size.C + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K);
break;
case Operator::kDgrad:
elements_per_split_k_slice = (problem_size.K + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K);
break;
case Operator::kWgrad:
elements_per_split_k_slice = (problem_size.N * problem_size.P * problem_size.Q + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = (elements_per_split_k_slice + threadblock_K - 1) / threadblock_K;
break;
default:
break;
}
}
} else if (group_mode == GroupMode::kDepthwise) {
int channels_per_cta = threadblock_N;
if (algorithm == IteratorAlgorithm::kAnalytic) {
switch (conv_operator) {
case Operator::kFprop:
iterations = problem_size.R * problem_size.S *
((channels_per_cta + threadblock_K - 1) / threadblock_K);
break;
default:
break;
}
}
} else { // Group conv
int channels_per_group = problem_size.C / problem_size.groups;
int k_per_group = problem_size.K / problem_size.groups;
if (algorithm == IteratorAlgorithm::kAnalytic) {
switch (conv_operator) {
case Operator::kFprop:
iterations = problem_size.R * problem_size.S * ((channels_per_group + threadblock_K - 1) / threadblock_K);
// In group conv, if k_per_group < threadblock_N, one Threadblock will calculate multiple groups
if (problem_size.groups != 1) {
if (k_per_group < threadblock_N) {
iterations *= threadblock_N / k_per_group;
}
}
break;
default:
break;
}
} else if (algorithm == IteratorAlgorithm::kOptimized) {
// Current optimized iterator only support GroupMode::kSingleGroup
if (group_mode == GroupMode::kSingleGroup) {
switch (conv_operator) {
case Operator::kFprop:
iterations = problem_size.R * problem_size.S * ((channels_per_group + threadblock_K - 1) / threadblock_K);
break;
default:
break;
}
}
}
}
return iterations;
}
template <int N = 1, int Output_P = 1, int Output_Q = 1>
CUTLASS_HOST_DEVICE
int depthwise_gemm_k_iterations(
Operator conv_operator,
int threadblock_K,
Conv2dProblemSize const &problem_size,
IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic,
GroupMode group_mode = GroupMode::kNone,
int threadblock_N = 0) {
int n = problem_size.N;
int p = (problem_size.P + Output_P - 1) / Output_P;
int q = (problem_size.Q + Output_Q - 1) / Output_Q;
int iterations = (n * p * q + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
return iterations;
}
CUTLASS_HOST_DEVICE
int implicit_gemm_k_iterations_per_channel(
Operator conv_operator,
int threadblock_K,
Conv2dProblemSize const &problem_size,
IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic) {
int iterations = 0; //0 means not applicable
if (algorithm == IteratorAlgorithm::kAnalytic || algorithm == IteratorAlgorithm::kOptimized) {
switch (conv_operator) {
case Operator::kFprop:
iterations = problem_size.R * problem_size.S;
break;
case Operator::kDgrad:
iterations = problem_size.R * problem_size.S;
break;
default:
break;
}
}
return iterations;
}
////////////////////////////////////////////////////////////////////////////////
// Mapping function (ImplicitGemm A, B, C -> Conv Activation, Filter, Output)
////////////////////////////////////////////////////////////////////////////////
/// Returns ImplicitGemm tensor A extent as Tensor4DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor4DCoord implicit_gemm_tensor_a_extent(
Operator conv_operator,
Conv2dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.activation_extent();
case cutlass::conv::Operator::kDgrad: return problem_size.output_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.output_extent();
default : break;
}
return cutlass::Tensor4DCoord();
}
/// Returns ImplicitGemm tensor B extent as Tensor4DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor4DCoord implicit_gemm_tensor_b_extent(
Operator conv_operator,
Conv2dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.filter_extent();
case cutlass::conv::Operator::kDgrad: return problem_size.filter_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.activation_extent();
default : break;
}
return cutlass::Tensor4DCoord();
}
/// Returns ImplicitGemm tensor C extent as Tensor4DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor4DCoord implicit_gemm_tensor_c_extent(
Operator conv_operator,
Conv2dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.output_extent();
case cutlass::conv::Operator::kDgrad: return problem_size.activation_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.filter_extent();
default : break;
}
return cutlass::Tensor4DCoord();
}
/// Returns ImplicitGemm tensor A size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_a_size(
Operator conv_operator,
Conv2dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.activation_size();
case cutlass::conv::Operator::kDgrad: return problem_size.output_size();
case cutlass::conv::Operator::kWgrad: return problem_size.output_size();
default : break;
}
return 0;
}
/// Returns ImplicitGemm tensor B size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_b_size(
Operator conv_operator,
Conv2dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.filter_size();
case cutlass::conv::Operator::kDgrad: return problem_size.filter_size();
case cutlass::conv::Operator::kWgrad: return problem_size.activation_size();
default : break;
}
return 0;
}
/// Returns ImplicitGemm tensor C size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_c_size(
Operator conv_operator,
Conv2dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.output_size();
case cutlass::conv::Operator::kDgrad: return problem_size.activation_size();
case cutlass::conv::Operator::kWgrad: return problem_size.filter_size();
default : break;
}
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
// Strided dgrad helper functions //
////////////////////////////////////////////////////////////////////////////////////////////////////
// Returns number of CTAs tile M to cover valid MMAs per starting filter postion
CUTLASS_HOST_DEVICE
int strided_dgrad_tile_m_per_filter(
Conv2dProblemSize const &problem_size,
int tile_size_m) {
// Compute NHW rows in Dx output that needs MMA per starting filter position
int rows_h_per_filter = (problem_size.H + problem_size.stride_h - 1) / problem_size.stride_h;
int rows_w_per_filter = (problem_size.W + problem_size.stride_w - 1) / problem_size.stride_w;
int rows_nhw_per_filter = problem_size.N * rows_h_per_filter * rows_w_per_filter;
// Number of CTAs tile M to cover valid MMAs per starting filter postion
int tile_m_per_filter = (rows_nhw_per_filter + tile_size_m - 1) / tile_size_m;
return tile_m_per_filter;
}
// Computes starting Dx coord (h, w) for given starting filter postion
CUTLASS_HOST_DEVICE
void strided_dgrad_starting_coords(
Conv2dProblemSize const &problem_size,
FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod,
int r, int s,
int &start_h, int &start_w) {
// function locals for remainder by fast divmod
int pad_h_rem_, pad_w_rem_;
// start_h = std::abs(problem_size.stride_h - ((problem_size.pad_h % problem_size.stride_h) - r)) % problem_size.stride_h;
stride_h_divmod.divmod(pad_h_rem_, problem_size.pad_h);
int r_ = absolute_value(problem_size.stride_h - (pad_h_rem_ - r));
stride_h_divmod.divmod(start_h, r_);
//start_w = std::abs(problem_size.stride_w - ((problem_size.pad_w % problem_size.stride_w) - s)) % problem_size.stride_w;
stride_w_divmod.divmod(pad_w_rem_, problem_size.pad_w);
int s_ = absolute_value(problem_size.stride_w - (pad_w_rem_ - s));
stride_w_divmod.divmod(start_w, s_);
}
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| 22,725 | C | 33.80245 | 152 | 0.623982 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/conv3d_problem_size.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This file contains definitions and utility functions for describing convolution problem sizes.
Conv3dProblem desciption:
activation (NDHWC),
filter (KTRSC),
output (NZPQK),
pading (pad_d, pad_h, pad_w),
stride (stride_d, stride_h, stride_w),
dilation (dilation_d, dilation_h, dilation_w).
Free functions to map:
Map tensor extents (Conv3d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_extent(ConvolutionOperator)
Map tensor sizes (Conv3d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_size(ConvolutionOperator)
Map tensor problem sizes (Conv3d -> ImplicitGemm): implicit_gemm_problem_size(ConvolutionOperator)
*/
#pragma once
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
namespace cutlass {
namespace conv {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Problem size structure
struct Conv3dProblemSize : public Conv2dProblemSize {
//
// Type definitions
//
// 3D coordinate for padding, stride, and dilation in (d, h, w) dimensions
using Coord3D = Coord<3>;
//
// Data members
//
// Conv3d strictly problem size parameters
int D, T, Z; // input depth, filter depth, output depth
int pad_d; // padding in depth dimension
int stride_d; // stride in depth dimension
int dilation_d; // dilation in depth dimension
//
// Methods
//
public:
CUTLASS_HOST_DEVICE
Conv3dProblemSize():
D(0), T(0), Z(0),
pad_d(0),
stride_d(1),
dilation_d(1),
Conv2dProblemSize() { }
/// Constructor for default padding, stride, dilation, and split-K
CUTLASS_HOST_DEVICE
Conv3dProblemSize(
int N,
int D,
int H,
int W,
int C,
int Z,
int P,
int Q,
int K,
int T,
int R,
int S,
Mode mode
):
D(D), T(T), Z(Z),
pad_d(T / 2), stride_d(1), dilation_d(1),
Conv2dProblemSize(N, H, W, C, P, Q, K, R, S, mode) { }
/// Constructor
CUTLASS_HOST_DEVICE
Conv3dProblemSize(
int N,
int D,
int H,
int W,
int C,
int K,
int T,
int R,
int S,
int Z,
int P,
int Q,
int pad_d,
int pad_h,
int pad_w,
int stride_d,
int stride_h,
int stride_w,
int dilation_d,
int dilation_h,
int dilation_w,
Mode mode,
int split_k_slices = 1,
int groups = 1
):
D(D), T(T), Z(Z),
pad_d(pad_d), stride_d(stride_d), dilation_d(dilation_d),
Conv2dProblemSize(
N, H, W, C, K, R, S, P, Q,
pad_h, pad_w,
stride_h, stride_w,
dilation_h, dilation_w,
mode, split_k_slices, groups) { }
/// Constructs convolution problem size from cutlass Tensor5DCoord and Coord3D
// set *user-defined* output size and sets Z, P, and Q (include all data members in ctor)
CUTLASS_HOST_DEVICE
Conv3dProblemSize(
cutlass::Tensor5DCoord input_size, // NDHWC
cutlass::Tensor5DCoord filter_size, // KTRSC
Coord3D padding, // pad_d, pad_h, pad_w
Coord3D stride, // stride_d, stride_h, stride_w
Coord3D dilation, // dilation_d, dilation_h, dilation_w
cutlass::Tensor5DCoord output_size, // NZPQK
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation,
int split_k_slices = 1,
int groups = 1
):
D(input_size.d()), T(filter_size.d()), Z(output_size.d()),
pad_d(padding[0]), stride_d(stride[0]), dilation_d(dilation[0]),
Conv2dProblemSize(
{input_size.n(), input_size.h(), input_size.w(), input_size.c()},
{filter_size.n(), filter_size.h(), filter_size.w(), filter_size.c()},
{padding[1], padding[1], padding[2], padding[2]},
{stride[1], stride[2]},
{dilation[1], dilation[2]},
{output_size.n(), output_size.h(), output_size.w(), output_size.c()},
mode, split_k_slices, groups
) { }
/// Constructs convolution problem size from cutlass Tensor5DCoord and Coord3D
// *computes* output size and sets Z, P and Q (include all data members in ctor)
CUTLASS_HOST_DEVICE
Conv3dProblemSize(
cutlass::Tensor5DCoord input_size, // NDHWC
cutlass::Tensor5DCoord filter_size, // KTRSC
Coord3D padding, // pad_d, pad_h, pad_w
Coord3D stride, // stride_d, stride_h, stride_w
Coord3D dilation, // dilation_d, dilation_h, dilation_w
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation,
int split_k_slices = 1,
int groups = 1
):
D(input_size.d()), T(filter_size.d()),
pad_d(padding[0]), stride_d(stride[0]), dilation_d(dilation[0]),
Conv2dProblemSize(
{input_size.n(), input_size.h(), input_size.w(), input_size.c()},
{filter_size.n(), filter_size.h(), filter_size.w(), filter_size.c()},
{padding[1], padding[1], padding[2], padding[2]},
{stride[1], stride[2]},
{dilation[1], dilation[2]},
mode, split_k_slices, groups
) {
// set output Z
Z = ((D + pad_d * 2 - T * dilation_d) / stride_d) + 1;
}
/// Equality operator (ignores mode and split_k_slice)
CUTLASS_HOST_DEVICE
bool operator==(Conv3dProblemSize const &conv) const {
return (
(N == conv.N) && (D == conv.D) && (H == conv.H) && (W == conv.W) && (C == conv.C) &&
(K == conv.K) && (T == conv.T) && (R == conv.R) && (S == conv.S) &&
(Z == conv.Z) &&(P == conv.P) && (Q == conv.Q) &&
(pad_d == conv.pad_d) && (pad_h == conv.pad_h) && (pad_w == conv.pad_w) &&
(stride_d == conv.stride_d) && (stride_h == conv.stride_h) && (stride_w == conv.stride_w) &&
(dilation_d == conv.dilation_d) && (dilation_h == conv.dilation_h) && (dilation_w == conv.dilation_w)
);
}
/// Inequality operator
CUTLASS_HOST_DEVICE
bool operator!=(Conv3dProblemSize const &rhs) const {
return !(*this == rhs);
}
// Reset covolution mode in the problem
CUTLASS_HOST_DEVICE
Conv3dProblemSize reset_mode(cutlass::conv::Mode mode_) {
Conv3dProblemSize tmp(*this);
tmp.mode = mode_;
return tmp;
}
// Reset covolution mode in the problem
CUTLASS_HOST_DEVICE
Conv3dProblemSize reset_split_k_slices(int split_k_slices_) {
Conv3dProblemSize tmp(*this);
tmp.split_k_slices = split_k_slices_;
return tmp;
}
/// Returns activation extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord activation_extent() const {
return cutlass::Tensor5DCoord ({N, D, H, W, C});
}
/// Returns filter extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord filter_extent() const {
return cutlass::Tensor5DCoord ({K, T, R, S, C});
}
/// Returns output extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord output_extent() const {
return cutlass::Tensor5DCoord ({N, Z, P, Q, K});
}
/// Returns activation size in number of elements
CUTLASS_HOST_DEVICE
int64_t activation_size() const {
return (N * D * H * W * C);
}
/// Returns filter size in number of elements
CUTLASS_HOST_DEVICE
int64_t filter_size() const {
return (K * T * R * S * C);
}
/// Returns output size in number of elements
CUTLASS_HOST_DEVICE
int64_t output_size() const {
return (N * Z * P * Q * K);
}
/// Returns output extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
Coord3D padding() const {
return Coord3D ({pad_d, pad_h, pad_w});
}
/// Returns stride as MatrixCoord
CUTLASS_HOST_DEVICE
Coord3D stride() const {
return Coord3D ({stride_d, stride_h, stride_w});
}
/// Returns dilation as MatrixCoord
CUTLASS_HOST_DEVICE
Coord3D dilation() const {
return Coord3D ({dilation_d, dilation_h, dilation_w});
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// ImplicitGemm helper functions //
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Determine the problem size of the implicit GEMM operation
CUTLASS_HOST_DEVICE
cutlass::gemm::GemmCoord implicit_gemm_problem_size(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
// Compute problem size
switch (conv_operator) {
case Operator::kFprop:
return gemm::GemmCoord(
problem_size.N * problem_size.Z * problem_size.P * problem_size.Q,
problem_size.K,
problem_size.T * problem_size.R * problem_size.S * problem_size.C
);
case Operator::kDgrad:
return gemm::GemmCoord(
problem_size.N * problem_size.D * problem_size.H * problem_size.W,
problem_size.C,
problem_size.T * problem_size.R * problem_size.S * problem_size.K
);
case Operator::kWgrad:
return gemm::GemmCoord(
problem_size.K,
problem_size.T * problem_size.R * problem_size.S * problem_size.C,
problem_size.N * problem_size.Z * problem_size.P * problem_size.Q
);
default:
break;
}
return gemm::GemmCoord();
}
// Determine the number of gemm_k iterations for conv2d problem using implicit gemm algorithm
CUTLASS_HOST_DEVICE
int implicit_gemm_k_iterations(
Operator conv_operator,
int threadblock_K,
Conv3dProblemSize const &problem_size,
IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic,
GroupMode group_mode = GroupMode::kNone,
int threadblock_N = 0) {
int iterations = 0;
int elements_per_split_k_slice = 0;
if (group_mode == GroupMode::kNone) {
switch (conv_operator) {
case Operator::kFprop:
elements_per_split_k_slice = (problem_size.C + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = problem_size.T * problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K);
break;
case Operator::kDgrad:
elements_per_split_k_slice = (problem_size.K + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = problem_size.T * problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K);
break;
case Operator::kWgrad:
elements_per_split_k_slice = (problem_size.N * problem_size.Z * problem_size.P * problem_size.Q + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = (elements_per_split_k_slice + threadblock_K - 1) / threadblock_K;
break;
default:
break;
}
} else if (group_mode == GroupMode::kDepthwise) {
int channels_per_cta = threadblock_N;
if (algorithm == IteratorAlgorithm::kAnalytic) {
switch (conv_operator) {
case Operator::kFprop:
iterations = problem_size.T * problem_size.R * problem_size.S *
((channels_per_cta + threadblock_K - 1) / threadblock_K);
break;
default:
break;
}
}
}
return iterations;
}
////////////////////////////////////////////////////////////////////////////////
// Mapping function (ImplicitGemm A, B, C -> Conv Activation, Filter, Output)
////////////////////////////////////////////////////////////////////////////////
/// Returns ImplicitGemm tensor A extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord implicit_gemm_tensor_a_extent(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.activation_extent();
case cutlass::conv::Operator::kDgrad: return problem_size.output_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.output_extent();
default : break;
}
return cutlass::Tensor5DCoord();
}
/// Returns ImplicitGemm tensor B extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord implicit_gemm_tensor_b_extent(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.filter_extent();
case cutlass::conv::Operator::kDgrad: return problem_size.filter_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.activation_extent();
default : break;
}
return cutlass::Tensor5DCoord();
}
/// Returns ImplicitGemm tensor C extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord implicit_gemm_tensor_c_extent(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.output_extent();
case cutlass::conv::Operator::kDgrad: return problem_size.activation_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.filter_extent();
default : break;
}
return cutlass::Tensor5DCoord();
}
/// Returns ImplicitGemm tensor A size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_a_size(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.activation_size();
case cutlass::conv::Operator::kDgrad: return problem_size.output_size();
case cutlass::conv::Operator::kWgrad: return problem_size.output_size();
default : break;
}
return 0;
}
/// Returns ImplicitGemm tensor B size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_b_size(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.filter_size();
case cutlass::conv::Operator::kDgrad: return problem_size.filter_size();
case cutlass::conv::Operator::kWgrad: return problem_size.activation_size();
default : break;
}
return 0;
}
/// Returns ImplicitGemm tensor C size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_c_size(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.output_size();
case cutlass::conv::Operator::kDgrad: return problem_size.activation_size();
case cutlass::conv::Operator::kWgrad: return problem_size.filter_size();
default : break;
}
return 0;
}
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| 16,292 | C | 33.085774 | 169 | 0.624969 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/convolution.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
This file contains definitions and utility functions for describing convolution problem sizes in terms of
activation (NHWC), filter (KRSC), output (NPQK), pading (pad_h, pad_w), stride (stride_h, stride_w),
dilation (dilation_h, dilation_w). Furthermore, it defines helper functions to map cutlass' implicit gemm
tensor extents, sizes, data types to that of convolutions extents, sizes, and data types.
* Mapping convolutions to Gemm computation *
Cutlass employs ImplicitGemm algorithm to implement convolutions. ImplicitGemm algorithm runs gemm operation
on convolution tensors Activation, Filter, and Output . The underlying gemm operation follows the standard
gemm definition:
C = A * B + C
A and B are input matrices
C is source and output matrix
For the three convolutional operators (Fprop, Dgrad, Wgrad), ImplicitGemm matrices A, B, and C are mapped on
to convolution tensors Activation, Filter and Output as per the below table:
___________________________________________________________________________
ConvolutionalOperator | A | B | C
___________________________________________________________________________
| | | | |
| Fprop | Activation | Filter | Output |
| Dgrad | Output | Filter | Activation |
| Wgrad | Output | Activation | Filter |
___________________________________________________________________________
In convolution codebase, DO NOT mix using (A, B, C) with (Acvitation, Filter, Output).
For example, a convolution class/function with A, B, Output is confusing and error-prone. Instead use below
mapping functions and adhere to using either A, B, C or Acvitation, Filter, Output.
Map elements' data types (ImplicitGemm -> Conv): GemmToConvElementMap
Map elements' data types (Conv -> ImplicitGemm): ConvToGemmElementMap
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
namespace cutlass {
namespace conv {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Convolutional operator
enum class Operator {
kFprop,
kDgrad,
kWgrad
};
/// Distinguishes convolution from cross correlation
enum class Mode {
kCrossCorrelation,
kConvolution
};
/// Selects among several implementation variants trading off performance with simplicity
enum class IteratorAlgorithm {
kAnalytic, ///< functionally correct in all cases but lower performance
kOptimized, ///< optimized for R <= 32, S <= 32 and unity-stride dgrad
kFixedChannels, ///< Analytic algorithm optimized for fixed channel count (C == AccessSize)
kFewChannels, ///< Analytic algorithm optimized for few channels (C divisible by AccessSize)
kFixedStrideDilation ///< Optimized for fixed stride and dilation
};
/// Distinguishes among partial specializations that accelerate certain problems where convolution
/// stride is unit.
enum class StrideSupport {
kStrided, ///< arbitrary convolution stride
kUnity, ///< unit convolution stride
kFixed ///< fixed convolution stride
};
/// Identifies split-K mode
enum class SplitKMode {
kNone,
kSerial,
kParallel
};
/// Identifies group mode
enum class GroupMode {
kNone,
kSingleGroup, ///< One CTA calculates one group or less
kMultipleGroup, ///< One CTA calculates multiple groups
kDepthwise ///< One CTA calculates cta_n groups (problem_size.C == problem_size.K == problem_size.groups)
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Shape of a tensor
template <
int N = 1,
int H = 1,
int W = 1,
int C = 1
>
struct TensorNHWCShape {
static int const kN = N;
static int const kH = H;
static int const kW = W;
static int const kC = C;
static int const kHW = H * W;
static int const kNHW = N * kHW;
static int const kNHWC = N * H * W * C;
static int const kCount = kNHWC;
//
// Static member functions
//
/// Returns a Coord object
CUTLASS_HOST_DEVICE
static Coord<4> toCoord() {
return make_Coord(kN, kH, kW, kC);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| 6,664 | C | 38.672619 | 112 | 0.596789 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/warp/mma_depthwise_simt_tile_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Describes the lane policy used by warp-level matrix multiply operators targeting SIMT
instructions
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Iterates over operands to warp-level matrix multiply operations targeting SIMT instructions
///
/// concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
cutlass::gemm::Operand Operand,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK = 1,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize = 1
>
class DepthwiseMmaSimtTileIterator;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization for B operands of row-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize>
class DepthwiseMmaSimtTileIterator<Shape_,
cutlass::gemm::Operand::kB,
Element_,
layout::RowMajor,
Policy_,
PartitionsK,
PartitionGroupSize>
: public cutlass::gemm::warp::MmaSimtTileIterator<Shape_,
cutlass::gemm::Operand::kB,
Element_,
layout::RowMajor,
Policy_,
PartitionsK,
PartitionGroupSize> {
using Base = cutlass::gemm::warp::MmaSimtTileIterator<Shape_,
cutlass::gemm::Operand::kB,
Element_,
layout::RowMajor,
Policy_,
PartitionsK,
PartitionGroupSize>;
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static cutlass::gemm::Operand const kOperand = cutlass::gemm::Operand::kB;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::RowMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = typename Base::TensorRef;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Thread-level shape of a fragment
using ThreadShape = typename Base::ThreadShape;
/// Number of individual loads
using Iterations = typename Base::Iterations;
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
static_assert(Policy::LaneMmaShape::kN == 1, "Each thread should be 1 element per LDS along the k-dim");
private:
MatrixCoord lane_offset_;
int channel_idx_;
int base_channel_idx_;
int warps_n_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
DepthwiseMmaSimtTileIterator():Base() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
DepthwiseMmaSimtTileIterator(
TensorRef ref,
int lane_id
) : Base(ref, lane_id) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
warps_n_ = -1;
channel_idx_ = 0;
base_channel_idx_ = 0;
lane_offset_ = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN);
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
DepthwiseMmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
if(warps_n_ == -1){
warps_n_ = coord.column();
}
Base::add_tile_offset(coord);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator. (vector loads)
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kN> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kRow; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
void const *ptr = this->ref_.data() +
this->ref_.offset({-(channel_idx_ - base_channel_idx_),
n * Policy::WarpShape::kColumn}) +
pointer_offset / Policy::LaneMmaShape::kN;
// Base_k of a warp + Base_k of current threads.
int thread_k_base_idx =
warps_n_ * Shape::kColumn / Policy::LaneMmaShape::kN + lane_offset_.column();
if (channel_idx_ + k == thread_k_base_idx + n * Policy::WarpShape::kColumn) {
// Depthwise kernel would only do computation when channel == k.
// Loads an element when the current computation channel == the k corresponding to this thread.
arch::shared_load(dst_ptr[n + k * Iterations::kColumn], ptr);
} else {
// Reduce SMEM load
dst_ptr[n + k * Iterations::kColumn].fill(Element(0));
}
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
if(k_group % PartitionGroupSize == 0 && k_group != 0){
base_channel_idx_ = k_group;
}
channel_idx_ = k_group;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Size of filter (concept: gemm::GemmShape<Depth, Height, Width>)
typename FilterShape_,
/// Size of the matrix to load (concept: MatrixShape)
typename ThreadOutputShape_,
/// Size of the matrix to load (concept: MatrixShape)
typename ThreadBlockOutputShape_,
/// Operand identity
cutlass::gemm::Operand Operand,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Iterator algo type
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape = cutlass::MatrixShape<-1, -1>,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape = cutlass::MatrixShape<-1, -1>,
/// Activation Shape loaded by threadblock
typename ActivationShape = cutlass::conv::TensorNHWCShape<-1,-1,-1,-1>,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK = 1,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize = 1>
class DepthwiseDirect2dConvSimtTileIterator;
/// Specialization for A operands of row-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Size of filter (concept: gemm::GemmShape<Depth, Height, Width>)
typename FilterShape_,
/// Size of the matrix to load (concept: TensorNHWC)
typename ThreadOutputShape_,
/// Size of the matrix to load (concept: TensorNHWC)
typename ThreadBlockOutputShape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Iterator algo type
conv::IteratorAlgorithm IteratorAlgorithm,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape,
/// Activation Shape loaded by threadblock
typename ActivationShape,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize>
class DepthwiseDirect2dConvSimtTileIterator<Shape_,
FilterShape_,
ThreadOutputShape_,
ThreadBlockOutputShape_,
cutlass::gemm::Operand::kA,
Element_,
Policy_,
IteratorAlgorithm,
StrideShape,
DilationShape,
ActivationShape,
PartitionsK,
PartitionGroupSize> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of filter (concept: gemm::GemmShape<Depth, Height, Width>)
using FilterShape = FilterShape_;
/// Shape of tile to load (concept: TensorNHWC)
using ThreadOutputShape = ThreadOutputShape_;
/// Shape of tile to load (concept: TensorNHWC)
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
/// Operand tag
static cutlass::gemm::Operand const kOperand = cutlass::gemm::Operand::kA;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::RowMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
//
// Derived quantities
//
static_assert(!(Shape::kRow % Policy::WarpShape::kRow),
"The warp-level GEMM M size must be divisible by the number of threads arranged along the M dimension.");
static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero.");
static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero.");
static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero.");
static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero.");
// Thread-level shape of a fragment
using ThreadShape = MatrixShape<
ThreadOutputShape::kNHW, // Output tile shape Computed by current threads
ThreadOutputShape::kC
>;
static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
/// Number of individual loads
using Iterations = MatrixShape<
ThreadShape::kRow,
ThreadShape::kColumn / Policy::LaneMmaShape::kN
>;
using ThreadTileCount = MatrixShape<
ThreadBlockOutputShape::kH / ThreadOutputShape::kH,
ThreadBlockOutputShape::kW / ThreadOutputShape::kW
>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, ThreadShape::kCount>;
protected:
/// Internal reference
cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kN>, layout::RowMajor> ref_;
int activation_offset[ThreadOutputShape::kH][ThreadOutputShape::kW][Iterations::kColumn];
int iterator_r_;
int iterator_s_;
int iterator_offset_;
int inc_next_s_ ;
int inc_next_r_ ;
MatrixCoord lane_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator(
TensorRef ref,
int lane_id
) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
// Set channel offset
lane_offset_ = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN);
ref.add_coord_offset(lane_offset_);
ref_.reset(reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(ref.data()),
ref.stride(0) / Policy::LaneMmaShape::kN);
iterator_r_ = 0;
iterator_s_ = 0;
iterator_offset_ = 0;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
template<typename Params>
CUTLASS_HOST_DEVICE
void setup_initial_status(Params const& params) {
inc_next_s_ = params.inc_next[0];
inc_next_r_ = params.inc_next[1];
// Get base HW offset of current threads
int threadgroup = threadIdx.x / (ThreadBlockOutputShape::kC / ThreadOutputShape::kC);
int base_p_ =
(threadgroup / (ThreadTileCount::kColumn)) * ThreadOutputShape::kH;
int base_q_ =
(threadgroup % (ThreadTileCount::kColumn)) * ThreadOutputShape::kW;
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < ThreadOutputShape::kH; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int q = 0; q < ThreadOutputShape::kW; ++q) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < Iterations::kColumn; ++col) {
int base_w = (base_q_ + q) * params.stride[0];
int base_h = (base_p_ + p) * params.stride[1];
int offset = base_h * params.activation_tile_w + base_w;
activation_offset[p][q][col] = offset;
}
}
}
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
// Set warp row and col start
lane_offset_ = MatrixCoord({lane_offset_.row() + coord.row() * Shape::kRow, lane_offset_.column()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
void advance(int32_t pointer_offset) {
ref_.reset(ref_.data() + pointer_offset / sizeof(Element) / Policy::LaneMmaShape::kN);
iterator_s_ = 0;
iterator_r_ = 0;
iterator_offset_ = 0;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &operator++() {
++iterator_s_;
if (iterator_s_ < FilterShape::kColumn) {
iterator_offset_ += inc_next_s_;
return *this;
}
iterator_s_ = 0;
++iterator_r_;
if (iterator_r_ < FilterShape::kRow) {
iterator_offset_ += inc_next_r_;
return *this;
}
iterator_r_ = 0;
iterator_offset_ = 0;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator & operator--() {
// Do nothing
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator. (vector loads)
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kN> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < ThreadOutputShape::kH; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int q = 0; q < ThreadOutputShape::kW; ++q) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
void const *ptr = ref_.data() +
ref_.offset({activation_offset[p][q][n] + (iterator_offset_),
n * Policy::WarpShape::kColumn}) +
pointer_offset / Policy::LaneMmaShape::kN;
arch::shared_load(dst_ptr[n + q + p * ThreadOutputShape::kW], ptr);
}
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
// Do nothing at present.
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, Index pointer_offset) const {
store_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization for A operands of row-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Size of filter (concept: gemm::GemmShape<Depth, Height, Width>)
typename FilterShape_,
/// Size of the matrix to load (concept: TensorNHWC)
typename ThreadOutputShape_,
/// Size of the matrix to load (concept: TensorNHWC)
typename ThreadBlockOutputShape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape_,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape_,
/// Activation Shape loaded by threadblock
typename ActivationShape_,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize>
class DepthwiseDirect2dConvSimtTileIterator<Shape_,
FilterShape_,
ThreadOutputShape_,
ThreadBlockOutputShape_,
cutlass::gemm::Operand::kA,
Element_,
Policy_,
IteratorAlgorithm::kFixedStrideDilation,
StrideShape_,
DilationShape_,
ActivationShape_,
PartitionsK,
PartitionGroupSize> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of filter (concept: gemm::GemmShape<Depth, Height, Width>)
using FilterShape = FilterShape_;
/// Shape of tile to load (concept: TensorNHWC)
using ThreadOutputShape = ThreadOutputShape_;
/// Shape of tile to load (concept: TensorNHWC)
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
/// Stride ( MatrixShape<Height, Width> )
using StrideShape = StrideShape_;
/// Dilation ( MatrixShape<Height, Width> )
using DilationShape = DilationShape_;
/// Activation Shape loaded by threadblock
using ActivationShape = ActivationShape_;
/// Operand tag
static cutlass::gemm::Operand const kOperand = cutlass::gemm::Operand::kA;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::RowMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
//
// Derived quantities
//
static_assert(!(Shape::kRow % Policy::WarpShape::kRow),
"The warp-level GEMM M size must be divisible by the number of threads arranged "
"along the M dimension.");
static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero.");
static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero.");
static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero.");
static_assert(Shape::kRow / Policy::WarpShape::kRow > 0,
"Shape::kRow / Policy::WarpShape::kRow must be greater than zero.");
// Activations loaded by threadblock
static int const ThreadActivationShapeH = (ThreadOutputShape::kH - 1) * StrideShape::kRow +
(FilterShape::kRow - 1) * DilationShape::kRow + 1;
static int const ThreadActivationShapeW = (ThreadOutputShape::kW - 1) * StrideShape::kColumn +
(FilterShape::kColumn - 1) * DilationShape::kColumn + 1;
using ThreadActivationShape = cutlass::conv::
TensorNHWCShape<1, ThreadActivationShapeH, ThreadActivationShapeW, ThreadOutputShape::kC>;
// Thread-level shape of a fragment
using ThreadShape =
MatrixShape<ThreadOutputShape::kNHW,
ThreadOutputShape::kC>;
static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
/// Number of individual loads
using Iterations =
MatrixShape<ThreadShape::kRow, ThreadShape::kColumn / Policy::LaneMmaShape::kN>;
using ThreadTileCount = MatrixShape<ThreadBlockOutputShape::kH / ThreadOutputShape::kH,
ThreadBlockOutputShape::kW / ThreadOutputShape::kW>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, ThreadShape::kCount>;
protected:
/// Internal reference
cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kN>, layout::RowMajor> ref_;
Array<Element, Policy::LaneMmaShape::kN>
activation[ThreadActivationShape::kH][ThreadActivationShape::kW][Iterations::kColumn];
int iterator_r_;
int iterator_s_;
MatrixCoord lane_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator() {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator(TensorRef ref, int lane_id) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
// Set channel offset
lane_offset_ = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN);
ref.add_coord_offset(lane_offset_);
ref_.reset(reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(ref.data()),
ref.stride(0) / Policy::LaneMmaShape::kN);
iterator_r_ = 0;
iterator_s_ = 0;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
template <typename Params>
CUTLASS_HOST_DEVICE void setup_initial_status(
Params const ¶ms) {
// Get base HW offset of current threads
int threadgroup = threadIdx.x / (ThreadBlockOutputShape::kC / ThreadOutputShape::kC);
int base_h =
(threadgroup / (ThreadTileCount::kColumn)) * ThreadOutputShape::kH * StrideShape::kRow;
int base_w =
(threadgroup % (ThreadTileCount::kColumn)) * ThreadOutputShape::kW * StrideShape::kColumn;
CUTLASS_PRAGMA_UNROLL
for (int h = 0; h < ThreadActivationShape::kH; ++h) {
CUTLASS_PRAGMA_UNROLL
for (int w = 0; w < ThreadActivationShape::kW; ++w) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < Iterations::kColumn; ++col) {
int offset = (base_h + h) * ActivationShape::kW + (base_w + w);
void const *ptr = ref_.data() + ref_.offset({offset, col * Policy::WarpShape::kColumn});
arch::shared_load(activation[h][w][col], ptr);
}
}
}
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
// Set warp row and col start
lane_offset_ =
MatrixCoord({lane_offset_.row() + coord.row() * Shape::kRow, lane_offset_.column()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
void advance(int32_t pointer_offset) {
ref_.reset(ref_.data() + pointer_offset / sizeof(Element) / Policy::LaneMmaShape::kN);
iterator_s_ = 0;
iterator_r_ = 0;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &operator++() {
++iterator_s_;
if (iterator_s_ < FilterShape::kColumn) {
return *this;
}
iterator_s_ = 0;
++iterator_r_;
if (iterator_r_ < FilterShape::kRow) {
return *this;
}
iterator_r_ = 0;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &operator--() {
// Do nothing
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator. (vector loads)
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kN> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < ThreadOutputShape::kH; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int q = 0; q < ThreadOutputShape::kW; ++q) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
const int h = p * StrideShape::kRow + iterator_r_ * DilationShape::kRow;
const int w = q * StrideShape::kColumn + iterator_s_ * DilationShape::kColumn;
dst_ptr[n + q + p * ThreadOutputShape::kW] = activation[h][w][n];
}
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); }
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
// Do nothing at present.
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, Index pointer_offset) const {
store_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
} // namespace warp
} // namespace conv
} // namespace cutlass
| 30,655 | C | 34.522596 | 127 | 0.625999 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/warp/scale_bias_relu_transform.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level per channel scale+bias+relu before
matrix multiply-accumulate operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename FragmentActivations, typename FragmentScaleBias>
struct FpropScaleBiasReluTransform {
using T = typename FragmentActivations::Element;
static int const NumActivations = FragmentActivations::kElements;
static int const NumScaleBias = FragmentScaleBias::kElements;
static int const MmaElements = 2;
// One element has one scale and one bias
static int const MmaScaleBiasPair = 2;
// 16816 has 2 columns
static int const MmaCols = 2;
using MmaOperand = Array<T, MmaElements>;
using ScaleBiasOperand = Array<T, MmaElements * MmaScaleBiasPair>;
CUTLASS_DEVICE
void transform(MmaOperand &activations, ScaleBiasOperand const &scale_bias) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
uint32_t *ptr_activations = reinterpret_cast<uint32_t *>(&activations);
uint32_t const *ptr_scale_bias = reinterpret_cast<uint32_t const *>(&scale_bias);
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
// We assumes the pair of FP16 are either both inbound or both out-of-bound.
// It requires C to be an even number.
asm volatile(
"{\n\t"
" .reg .pred %%p;\n\t"
" .reg .b32 t1;\n\t"
" setp.eq.u32 %%p, %2, %4;\n\t"
" fma.rn.f16x2.relu t1, %1, %2, %3;\n"
" selp.u32 %0, 0, t1, %%p;\n\t"
"}\n"
: "=r"(ptr_activations[0])
: "r"(ptr_scale_bias[0]), "r"(ptr_activations[0]),
"r"(ptr_scale_bias[1]), "n"(cutlass::arch::OOB_NAN_F16x2));
#else
// TODO: write emulation code
assert(0);
#endif
}
CUTLASS_DEVICE
void operator()(FragmentActivations &activations,
FragmentScaleBias const &scale_bias) {
MmaOperand *ptr_activations = reinterpret_cast<MmaOperand *>(&activations);
ScaleBiasOperand const *ptr_scale_bias =
reinterpret_cast<ScaleBiasOperand const *>(&scale_bias);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < (NumActivations / MmaElements); ++i) {
transform(ptr_activations[i], ptr_scale_bias[(i / MmaScaleBiasPair) % MmaCols]);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename FragmentActivations, typename FragmentScaleBias>
struct WgradScaleBiasReluTransform {
using T = typename FragmentActivations::Element;
static int const NumActivations = FragmentActivations::kElements;
static int const NumScaleBias = FragmentScaleBias::kElements;
static int const MmaElements = 2;
// One element has one scale and one bias
static int const MmaScaleBiasPair = 2;
// 16816 has 2 rows
static int const MmaRows = 2;
using MmaOperand = Array<T, MmaElements>;
using ScaleBiasOperand = Array<__half2, MmaScaleBiasPair>;
CUTLASS_DEVICE
void transform(MmaOperand &activations, ScaleBiasOperand const &scale_bias) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
__half2 *ptr_activations = reinterpret_cast<__half2 *>(&activations);
uint32_t const *ptr_scale_bias = reinterpret_cast<uint32_t const *>(&scale_bias);
#if 1
// CUDA + PTX version
bool h1_oob = (reinterpret_cast<uint16_t &>(ptr_activations[0].x) == cutlass::arch::OOB_NAN_F16);
bool h2_oob = (reinterpret_cast<uint16_t &>(ptr_activations[0].y) == cutlass::arch::OOB_NAN_F16);
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
// We cannot gurantee that the pair of F16 are both in bound or both
// out-of-bound because C x R x S can be an odd number.
asm volatile(
"{\n\t"
" fma.rn.f16x2.relu %0, %1, %2, %3;\n"
"}"
: "=r"(reinterpret_cast<uint32_t &>(ptr_activations[0]))
: "r"(ptr_scale_bias[0]), "r"(reinterpret_cast<uint32_t &>(ptr_activations[0])),
"r"(ptr_scale_bias[1]));
reinterpret_cast<uint32_t &>(ptr_activations[0]) = h1_oob ?
(reinterpret_cast<uint32_t &>(ptr_activations[0]) & 0xffff0000) :
reinterpret_cast<uint32_t &>(ptr_activations[0]);
reinterpret_cast<uint32_t &>(ptr_activations[0]) = h2_oob ?
(reinterpret_cast<uint32_t &>(ptr_activations[0]) & 0xffff) :
reinterpret_cast<uint32_t &>(ptr_activations[0]);
#else
// pure PTX version
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
asm volatile(
"{\n"
" .reg .b16 t1, t2;\n"
" .reg .b32 t3, t4, t5, t6;\n"
" .reg .pred p1, p2;\n"
" mov.b32 {t1, t2}, %2;\n"
" setp.eq.s16 p1, t1, %4;\n"
" setp.eq.s16 p2, t2, %4;\n"
" fma.rn.f16x2.relu t3, %1, %2, %3;\n"
" and.b32 t4, t3, %5;\n"
" selp.b32 t5, t4, t3, p1;\n"
" and.b32 t6, t5, %6;\n"
" selp.b32 %0, t6, t5, p2;\n"
"}\n"
: "=r"(reinterpret_cast<uint32_t &>(ptr_activations[0]))
: "r"(ptr_scale_bias[0]), "r"(reinterpret_cast<uint32_t &>(ptr_activations[0])),
"r"(ptr_scale_bias[1]), "n"(cutlass::arch::OOB_NAN_F16), "n"(0xffff0000), "n"(0x0000ffff));
#endif
#else
// TODO: write emulation code
assert(0);
#endif
}
CUTLASS_DEVICE
void operator()(FragmentActivations &activations,
FragmentScaleBias const &scale_bias) {
MmaOperand *ptr_activations = reinterpret_cast<MmaOperand *>(&activations);
ScaleBiasOperand const *ptr_scale_bias =
reinterpret_cast<ScaleBiasOperand const *>(&scale_bias);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < (NumActivations / MmaElements); ++i) {
transform(ptr_activations[i], ptr_scale_bias[(i / MmaRows)]);
}
}
};
} // namespace warp
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,772 | C | 38.165178 | 101 | 0.625627 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/thread/depthwise_mma.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for depthwise convolution
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/arch/mma.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// MMA operation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Number of threads participating
int kThreads_,
/// Data type of A elements
typename ElementA,
/// Data type of B elements
typename ElementB,
/// Element type of C matrix
typename ElementC,
/// Inner product operator
typename Operator
>
struct ElementwiseInnerProduct;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// General implementation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Data type of B elements
typename ElementB_,
/// Element type of C matrix
typename ElementC_>
struct ElementwiseInnerProduct<Shape_, 1, ElementA_, ElementB_, ElementC_, arch::OpMultiplyAdd> {
using Shape = Shape_;
using Operator = arch::OpMultiplyAdd;
using ElementC = ElementC_;
CUTLASS_HOST_DEVICE
void operator()(Array<ElementC_, Shape::kN> &d,
Array<ElementA_, Shape::kN> const &a,
Array<ElementB_, Shape::kN> const &b,
Array<ElementC_, Shape::kN> const &c) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Shape::kN; ++i) {
d[i] = a[i] * b[i] + c[i];
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of half_t
template <>
struct ElementwiseInnerProduct<
gemm::GemmShape<2, 2, 1>,
1,
half_t,
half_t,
half_t,
arch::OpMultiplyAdd> {
using Shape = gemm::GemmShape<2, 2, 1>;
using Operator = arch::OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 2> &d,
Array<half_t, 2> const &a,
Array<half_t, 2> const &b,
Array<half_t, 2> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 const & A = reinterpret_cast<__half2 const &>(a);
__half2 const & B = reinterpret_cast<__half2 const &>(b);
__half2 const & C = reinterpret_cast<__half2 const &>(c);
__half2 tmp_D = __hfma2(A, B, C);
d = reinterpret_cast<Array<half_t, 2> const &>(tmp_D);
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
d[i] = a[i] * b[i] + c[i];
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape,
/// Data type of A elements
typename ElementA,
/// Data type of B elements
typename ElementB,
/// Element type of C matrix
typename ElementC,
/// Concept: arch::OpMultiplyAdd or arch::Mma<>
typename Operator = arch::OpMultiplyAdd,
/// Used for partial specialization
typename Enable = bool
>
struct DepthwiseDirectConvElementwiseInnerProduct;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles all packed matrix layouts
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Data type of B elements
typename ElementB_,
/// Element type of C matrix
typename ElementC_,
/// Operator used to compute GEMM
typename Operator_
>
struct DepthwiseDirectConvElementwiseInnerProductGeneric {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = ElementA_;
/// Data type of operand B
using ElementB = ElementB_;
/// Element type of operand C
using ElementC = ElementC_;
/// Underlying mathematical operator
using Operator = Operator_;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMN>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Instruction
using MmaOp = cutlass::conv::thread::ElementwiseInnerProduct<
gemm::GemmShape<Shape::kN, Shape::kN, 1>,
1,
ElementA,
ElementB,
ElementC,
Operator>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
Array<ElementC, Shape::kN> *ptr_D = reinterpret_cast<Array<ElementC, Shape::kN> *>(&D);
Array<ElementA, Shape::kN> const *ptr_A =
reinterpret_cast<Array<ElementA, Shape::kN> const *>(&A);
Array<ElementB, Shape::kN> const *ptr_B =
reinterpret_cast<Array<ElementB, Shape::kN> const *>(&B);
MmaOp mma_op;
// Copy accumulators
D = C;
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN / MmaOp::Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
Array<ElementC, MmaOp::Shape::kN> tmpD = ptr_D[m * Shape::kN / MmaOp::Shape::kN + n];
Array<ElementA, MmaOp::Shape::kN> tmpA = ptr_A[m * Shape::kN / MmaOp::Shape::kN + n];
Array<ElementB, MmaOp::Shape::kN> tmpB = ptr_B[n];
mma_op(tmpD, tmpA, tmpB, tmpD);
ptr_D[m * Shape::kN / MmaOp::Shape::kN + n] = tmpD;
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Data type of B elements
typename ElementB_,
/// Element type of C matrix
typename ElementC_
>
struct DepthwiseDirectConvElementwiseInnerProduct<
Shape_,
ElementA_,
ElementB_,
ElementC_,
arch::OpMultiplyAdd
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = ElementA_;
/// Data type of operand B
using ElementB = ElementB_;
/// Element type of operand C
using ElementC = ElementC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA =
Array<ElementA, Shape::kMN>; // output_tile_size per thread * groups_per_thread
/// B operand storage
using FragmentB = Array<ElementB, Shape::kN>; // 1 * groups_per_thread
/// C operand storage
using FragmentC =
Array<ElementC, Shape::kMN>; // output_tile_size per thread * groups_per_thread
static bool const use_optimized = 0;
using ArchMmaOperator = DepthwiseDirectConvElementwiseInnerProductGeneric<Shape,
ElementA,
ElementB,
ElementC,
Operator>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
ArchMmaOperator mma;
mma(D, A, B, C);
}
};
} // namespace thread
} // namespace conv
} // namespace cutlass
| 9,689 | C | 28.723926 | 100 | 0.590154 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_analytic.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dFpropFilterTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv3dAnalyticParams<Layout>;
private:
Params const ¶ms_;
ConvProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
int filter_t_;
int filter_r_;
int filter_s_;
int filter_c_;
int offset_k_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv3dFpropFilterTileAccessIteratorAnalytic(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_t_(0),
filter_r_(0),
filter_s_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.row() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] = threadblock_offset.column() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * 8 / sizeof_bits<Element>::value;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
return;
}
filter_t_ = 0;
filter_c_ += Shape::kRow * problem_size_.split_k_slices;
}
/// Returns the coordinate in the filter tensor W that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int k = offset_k_[iteration_strided_];
return TensorCoord(k, filter_t_, filter_r_, filter_s_, filter_c_);
}
/// Returns true if the current coordinate is within the activations tensor W
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.K &&
coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dFpropFilterTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 7,945 | C | 30.283464 | 107 | 0.654751 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorCxRSKx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dFpropFilterTileAccessIteratorOptimized{
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
struct Params : Conv2dFpropFilterIteratorOptimizedParams<Layout> {
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Conv2dFpropFilterIteratorOptimizedParams<Layout> const &base):
Conv2dFpropFilterIteratorOptimizedParams<Layout>(base) { }
CUTLASS_HOST_DEVICE
Params(
Conv2dProblemSize const &problem_size,
Layout const &layout
):
Conv2dFpropFilterIteratorOptimizedParams<Layout>(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
) {
}
};
private:
Conv2dFpropFilterIteratorOptimizedParams<Layout> const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
uint32_t predicates_[kAccessesPerVector];
int filter_rs_;
int filter_c_;
int channels_per_group_;
//
// Assertions
//
// We map predicates into bits packed in this uint32_t container
static_assert(ThreadMap::Iterations::kStrided < sizeof(predicates_) * 8,
"Currently, the number of loads per iteration is limited by the size of the predicates container.");
public:
CUTLASS_HOST_DEVICE
Conv2dFpropFilterTileAccessIteratorOptimized(
Conv2dFpropFilterIteratorOptimizedParams<Layout> const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_{0},
filter_rs_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.row() + thread_coord.contiguous();
Index column = threadblock_offset.column() + thread_coord.strided();
channels_per_group_ = problem_size_.C / problem_size_.groups;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
uint32_t pred = ((column + s * ThreadMap::Delta::kStrided < problem_size_.K) ? 1u : 0);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
predicates_[v_idx] |= (pred << s);
}
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= channels_per_group_);
}
pointer_ += (
params_.layout({filter_c_, column})
) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
LongIndex next = params_.inc_next_rs;
// moves to the next tile
++filter_rs_;
if (filter_rs_ == params_.RS) {
filter_rs_ = 0;
next = params_.inc_next_c;
filter_c_ += params_.filter_c_delta;
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= channels_per_group_);
}
pointer_ += next;
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(int v, bool clear = true) {
predicates_[v] = clear ? 0u : predicates_[v];
}
/// Returns true if the current coordinate is within the filter tensor W
CUTLASS_HOST_DEVICE
bool valid() {
return (predicates_[iteration_vector_] & (1u << iteration_strided_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dFpropFilterTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
// Move to the next K coordinate within the tile
pointer_ += params_.inc_next_k;
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
if (platform::is_same<Layout, layout::TensorCxRSKx<32>>::value) {
if (problem_size.K % 32) {
return Status::kErrorInvalidProblem;
}
}
if (platform::is_same<Layout, layout::TensorCxRSKx<64>>::value) {
if (problem_size.K % 64) {
return Status::kErrorInvalidProblem;
}
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 10,387 | C | 31.666667 | 105 | 0.660441 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/predicated_scale_bias_vector_access_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of scale and bias vectors.
This iterator uses masks to guard out-of-bounds accesses.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedScaleBiasVectorAccessIterator
///
template <typename ThreadblockShape,
typename Element,
typename Layout>
class PredicatedScaleBiasVectorAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for fprop pitch-linear data.
///
template <typename ThreadblockShape_, typename Element_>
class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_,
Element_,
layout::PitchLinear> {
public:
using ThreadblockShape = ThreadblockShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
static int const kThreads = ThreadblockShape::kContiguous / kElementsPerAccess;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
using Params = PredicatedScaleBiasVectorAccessIteratorParams;
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
BytePointer pointer_;
int problem_size_trs;
int problem_size_c;
int filter_trs_;
TensorCoord thread_offset_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv2dProblemSize const &problem_size,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
problem_size_trs(problem_size.R * problem_size.S),
problem_size_c(problem_size.C),
filter_trs_(0) {
pointer_ = (thread_id < kThreads)
? reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(scale_pointer))
: reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(bias_pointer));
// Per-thread offset in logical coordinates of tensor
int thread_base = (thread_id < kThreads) ? 0 : kThreads;
thread_offset_ =
threadblock_offset +
TensorCoord((thread_id - thread_base) * kElementsPerAccess, 0);
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv3dProblemSize const &problem_size,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
problem_size_trs(problem_size.T * problem_size.R * problem_size.S),
problem_size_c(problem_size.C),
filter_trs_(0) {
pointer_ = (thread_id < kThreads)
? reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(scale_pointer))
: reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(bias_pointer));
// Per-thread offset in logical coordinates of tensor
int thread_base = (thread_id < kThreads) ? 0 : kThreads;
thread_offset_ =
threadblock_offset +
TensorCoord((thread_id - thread_base) * kElementsPerAccess, 0);
set_iteration_index(0);
}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv2dProblemSize const &problem_size,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorAccessIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv3dProblemSize const &problem_size,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorAccessIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {}
/// Advances an iterator along logical dimensions of matrix in units of whole threadblock tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
thread_offset_ =
thread_offset_ +
TensorCoord(ThreadblockShape::kContiguous * tile_offset.contiguous(), 0);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
(thread_offset_.contiguous() * sizeof_bits<Element>::value / 8));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator &operator++() {
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_trs_;
if (filter_trs_ == problem_size_trs) {
filter_trs_ = 0;
add_tile_offset(TensorCoord(1, 0));
}
}
/// Increment and return an instance to self.
CUTLASS_DEVICE
PredicatedScaleBiasVectorAccessIterator operator++(int) {
PredicatedScaleBiasVectorAccessIterator self(*this);
operator++();
return self;
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
uint32_t enabled = 0;
#if defined(_MSC_VER) || (__CUDACC_VER_MAJOR__ < 11)
enabled = threadIdx.x < kThreads * 2;
#else
asm volatile(
"{\n"
" .reg .u32 tid_reg;\n"
" .reg .pred p;\n"
" mov.u32 tid_reg, %%tid.x;\n"
" setp.lt.u32 p, tid_reg, %1;\n"
" selp.u32 %0, 1, 0, p;\n"
"}\n" : "+r"(enabled) :"n"(kThreads * 2));
#endif
return ((thread_offset_.contiguous() < problem_size_c) && enabled);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename ThreadblockShape_,
typename Element_>
class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_,
Element_,
layout::RowMajor> {
public:
using ThreadblockShape = ThreadblockShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedScaleBiasVectorAccessIterator<
layout::PitchLinearShape<ThreadblockShape::kColumn, ThreadblockShape::kRow>,
Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
using Params = PredicatedScaleBiasVectorAccessIteratorParams;
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Extent of tensor
Conv2dProblemSize const &problem_size,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params, problem_size, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Extent of tensor
Conv3dProblemSize const &problem_size,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params, problem_size, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Conv2dProblemSize const &problem_size, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorAccessIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Conv3dProblemSize const &problem_size, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorAccessIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Advances an iterator along logical dimensions of matrix in units of whole
/// threadblock tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator operator++(int) {
PredicatedScaleBiasVectorAccessIterator self(*this);
operator++();
return self;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
void advance() {
iterator_.advance();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 16,915 | C | 34.915074 | 100 | 0.640142 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/threadblock/depthwise_direct_conv_params.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Shape_,
typename OutputTileShape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess> >
class DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using OutputTileShape = OutputTileShape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1, "Require Iterations::kContiguous == 1");
static_assert(OutputTileShape::kN == 1, "Require OutputTileShape::kN == 1");
static_assert(OutputTileShape::kC == Shape::kColumn, "Require OutputTile shape == channels per threadblock");
//
// Parameters structure
//
using Params = Depthwise2dFpropDirectConvParams<Layout>;
private:
Conv2dProblemSize const &problem_size_;
Params const ¶ms_;
char const *pointer_;
// Base channels for current threadblock
int base_c_;
// Base activation index for current threadblock
int offset_intial_npq_;
// Base activation coord for current threadblock
TensorCoord activatioin_base_;
// Intial thread positioin
int offset_initial_hwc_;
// Overall load instruction per thread.
int iterator_load_;
// thread loading position.
int iterator_hwc_;
// Number of loads for activations tensor X.
const int number_of_loads_;
public:
CUTLASS_HOST_DEVICE
DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset =
MatrixCoord()
)
: params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
offset_intial_npq_(threadblock_offset.row()),
offset_initial_hwc_(thread_idx),
iterator_load_(0),
number_of_loads_(params.activation_load_count) {
base_c_ = threadblock_offset.column();
set_activation_coord(offset_intial_npq_);
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
void set_activation_coord(int offset_npq) {
int offset_inital_n, offset_inital_p, offset_inital_q;
int residual;
params_.pq_divmod(offset_inital_n, residual, offset_npq);
params_.q_divmod(offset_inital_p, offset_inital_q, residual);
int base_n = offset_inital_n;
int base_h =
offset_inital_p * OutputTileShape::kH * problem_size_.stride_h - problem_size_.pad_h;
int base_w =
offset_inital_q * OutputTileShape::kW * problem_size_.stride_w - problem_size_.pad_w;
activatioin_base_ = TensorCoord(base_n, base_h, base_w, base_c_);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(
problem_size,
layout,
{Shape::kRow, Shape::kColumn},
{OutputTileShape::kN, OutputTileShape::kH, OutputTileShape::kW, OutputTileShape::kC},
sizeof_bits<Element>::value,
ThreadMap::kThreads,
ThreadMap::Detail::ShapeVec::kContiguous,
ThreadMap::kElementsPerAccess);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iterator_hwc_ = offset_initial_hwc_ + index * ThreadMap::kThreads;
iterator_load_ = index;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// Go to next threadblock
offset_intial_npq_ += problem_size_.split_k_slices;
set_activation_coord(offset_intial_npq_);
}
/// Returns the coordinate in the activations tensor X that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int c = iterator_hwc_ % ThreadMap::Detail::ShapeVec::kContiguous ;
int next = iterator_hwc_ / ThreadMap::Detail::ShapeVec::kContiguous ;
int h, w;
params_.activation_tile_w_divmod(h, w, next) ;
c = c * AccessType::kElements;
return activatioin_base_ + TensorCoord(0, h, w, c);
}
/// Returns true if the current coordinate is within the activations tensor X
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N && coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W && coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
AccessType const *ptr =
reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
return ptr;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized &operator++() {
++iterator_load_;
iterator_hwc_ += ThreadMap::kThreads;
if (iterator_load_ < number_of_loads_) {
return *this;
}
iterator_load_ = 0;
iterator_hwc_ = offset_initial_hwc_;
return *this;
}
/// Determines the activation size loaded by iterator
CUTLASS_HOST_DEVICE
int get_load_size() {
return params_.activation_size;
}
/// Determines the iterations needed
CUTLASS_HOST_DEVICE
int get_iteration_num() {
return number_of_loads_;
}
/// Determines whether the Depthwise fprop can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 9,899 | C | 32.904109 | 111 | 0.664411 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/depthwise_direct_conv_params.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Extracts the host-params objects into non-template code.
*/
#pragma once
#define TRACE_CONV_PARAMS_INITIALIZERS_ENABLED 0
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#if TRACE_CONV_PARAMS_INITIALIZERS_ENABLED
#include <fstream>
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized
template<typename Layout_ = layout::TensorNHWC >
struct Depthwise2dFpropDirectConvParams;
/// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation
template<typename Layout_ = layout::TensorNHWC >
struct Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams;
/// Parameters structure used for DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized
template<typename Layout_ = layout::TensorNHWC >
struct Depthwise2dFpropDirectConvFilterIteratorParams;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized
template<>
struct Depthwise2dFpropDirectConvParams<layout::TensorNHWC> {
using Layout = layout::TensorNHWC;
Layout layout;
int32_t activation_tile_h;
int32_t activation_tile_w;
int32_t activation_tile_hw;
FastDivmod activation_tile_w_divmod;
int filter[2];
int stride[2];
int dilation[2];
int inc_next[2];
FastDivmod pq_divmod;
FastDivmod q_divmod;
int activation_load_count;
int activation_storage_elements;
int activation_size;
//
// Methods
//
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvParams() { }
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
MatrixCoord threadblock_shape, ///< CTA threadblock Shape
Layout::TensorCoord threadblock_output_shape, ///< Output tile Shape per threadblock
const int element_size_bits, ///< bits of activation element
const int thread_count, ///< threads per threadblock
const int thread_count_contiguous, ///< number of threads for continuous dimension
const int element_per_load) ///< element per each load
: layout(layout) {
filter[0] = problem_size.S;
filter[1] = problem_size.R;
stride[0] = problem_size.stride_w;
stride[1] = problem_size.stride_h;
dilation[0] = problem_size.dilation_w;
dilation[1] = problem_size.dilation_h;
// Compute activation_tile size per threadblock because stride and dilation are runtime params.
activation_tile_h = (threadblock_output_shape.h() - 1) * problem_size.stride_h +
(problem_size.R - 1) * problem_size.dilation_h + 1;
activation_tile_w = (threadblock_output_shape.w() - 1) * problem_size.stride_w +
(problem_size.S - 1) * problem_size.dilation_w + 1;
activation_tile_hw = activation_tile_h * activation_tile_w;
activation_tile_w_divmod = FastDivmod(activation_tile_w);
/// Below two values could not be templatized because the stride and dilation are runtime params
activation_load_count = (thread_count_contiguous * activation_tile_hw + (thread_count - 1)) / thread_count;
activation_storage_elements = activation_load_count * element_per_load * thread_count;
activation_size = activation_storage_elements * element_size_bits / 8;
// Fastdivmod for output P, Q
int tiles_p =
(problem_size.P + (threadblock_output_shape.h() - 1)) / (threadblock_output_shape.h());
int tiles_q = (problem_size.Q + (threadblock_output_shape.w() - 1)) /
(threadblock_output_shape.w());
pq_divmod = FastDivmod(tiles_p * tiles_q);
q_divmod = FastDivmod(tiles_q);
// next S
inc_next[0] = problem_size.dilation_w;
// next R
inc_next[1] = (activation_tile_w * problem_size.dilation_h - (problem_size.S - 1) * problem_size.dilation_w);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation
template <>
struct Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams<layout::TensorNHWC> {
using Layout = layout::TensorNHWC;
Layout layout;
FastDivmod pq_divmod;
FastDivmod q_divmod;
int activation_size;
//
// Methods
//
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams() {}
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< Layout object
MatrixCoord threadblock_shape, ///< Threadblock Shape
Layout::TensorCoord threadblock_output_shape, ///< Output tile Shape per threadblock
const int activation_size_ ///< Activation size loaded by iterator
)
: layout(layout),
activation_size(activation_size_) {
// Fastdivmod for output P, Q
int tiles_p =
(problem_size.P + (threadblock_output_shape.h() - 1)) / (threadblock_output_shape.h());
int tiles_q =
(problem_size.Q + (threadblock_output_shape.w() - 1)) / (threadblock_output_shape.w());
pq_divmod = FastDivmod(tiles_p * tiles_q);
q_divmod = FastDivmod(tiles_q);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized
template <>
struct Depthwise2dFpropDirectConvFilterIteratorParams<layout::TensorNHWC> {
using Layout = layout::TensorNHWC;
Layout layout;
int filter_size;
bool is_convolution;
//
// Methods
//
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvFilterIteratorParams() {}
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvFilterIteratorParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< Layout object
MatrixCoord threadblock_shape, ///< Threadblock Shape
const int filter_size_) ///< Filter size loaded by iterator
: layout(layout),
filter_size(filter_size_),
is_convolution(problem_size.mode == Mode::kConvolution){}
};
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,871 | C | 37.406926 | 113 | 0.655845 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dWgradOutputGradientTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
struct Params : Conv3dWgradOutputGradientIteratorOptimizedParams {
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() {}
CUTLASS_HOST_DEVICE
Params(Conv3dWgradOutputGradientIteratorOptimizedParams const &base)
: Conv3dWgradOutputGradientIteratorOptimizedParams(base) {}
CUTLASS_HOST_DEVICE
Params(Conv3dProblemSize const &problem_size, Layout const &layout)
: Conv3dWgradOutputGradientIteratorOptimizedParams(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}) {}
};
private:
Params const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
uint32_t predicates_;
int filter_k_;
int offset_nzpq_;
public:
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientTileAccessIteratorOptimized(
Params const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_(0),
filter_k_(0),
offset_nzpq_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.row() + thread_coord.contiguous();
offset_nzpq_ = threadblock_offset.column() + thread_coord.strided();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int filter_k = filter_k_ + c * ThreadMap::Delta::kContiguous;
int offset_nzpq = offset_nzpq_ + s * ThreadMap::Delta::kStrided;
bool predicate = valid_(at_(offset_nzpq, filter_k));
uint32_t pred = (predicate ? 1u : 0);
int pred_idx = c + s * ThreadMap::Iterations::kContiguous;
predicates_ |= (pred << pred_idx);
}
}
// Offset pointer to (iteration_strided_, iteration_contiguous_) = (0, 0)
pointer_ += (
offset_nzpq_ * params.layout.stride()[0] + filter_k_
) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_npq_) in GEMM-A by a CTA-K tile
offset_nzpq_ += Shape::kColumn * problem_size_.split_k_slices;
// Clear predicates if needed
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
if (offset_nzpq_ + s * ThreadMap::Delta::kStrided >= params_.NZPQ) {
uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous);
predicates_ = (predicates_ & (~kClearMask));
}
}
pointer_ += params_.inc_next_nzpq;
}
private:
/// Returns the coordinate in the output gradient tensor Dy that is (offset_nzpq, k) pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at_(int offset_nzpq, int k) const {
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// int nzpq = offset_nzpq_;
// int n = nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q);
// int residual = nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q);
//
// int z = residual / (problem_size_.P * problem_size_.Q);
// residual = residual % (problem_size_.P * problem_size_.Q);
//
// int p = residual / problem_size_.Q;
// int q = residual % problem_size_.Q;
int residual, n, z, p, q;
fast_divmod(n, residual, offset_nzpq, params_.ZPQ, params_.zpq_mul, params_.zpq_shr);
fast_divmod(z, residual, residual, params_.PQ, params_.pq_mul, params_.pq_shr);
fast_divmod(p, q, residual, problem_size_.Q, params_.q_mul, params_.q_shr);
return TensorCoord(n, z, p, q, k);
}
/// Returns true if the coord is within the output gradient tensor Dy
CUTLASS_HOST_DEVICE
bool valid_(TensorCoord coord) const {
return coord.n() < problem_size_.N &&
coord.c() < problem_size_.K;
}
public:
/// Returns true if the current coordinate is within the output gradient tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous;
return (predicates_ & (1u << pred_idx));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(
pointer_ +
iteration_strided_ * params_.offset_next_strided +
iteration_contiguous_ * params_.offset_next_contiguous
);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientTileAccessIteratorOptimized &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 10,744 | C | 33.549839 | 124 | 0.655622 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_tile_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template wraps the tile access iterator concept to load whole tiles from tensors in
memory used for implicit GEMM convolution.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename TileAccessIterator_>
class TileIterator {
public:
using TileAccessIterator = TileAccessIterator_;
using Shape = typename TileAccessIterator::Shape;
using Element = typename TileAccessIterator::Element;
using Layout = typename TileAccessIterator::Layout;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = typename TileAccessIterator::ThreadMap;
using AccessType = typename TileAccessIterator::AccessType;
using TensorRef = typename TileAccessIterator::TensorRef;
using Index = typename TileAccessIterator::Index;
using LongIndex = typename TileAccessIterator::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = TileAccessIterator::kIteratorAlgorithm;
static StrideSupport const kStrideSupport = TileAccessIterator::kStrideSupport;
using Params = typename TileAccessIterator::Params;
static int const kConvDim = TileAccessIterator::kConvDim;
using ConvProblemSize = typename TileAccessIterator::ConvProblemSize;
static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<
Element,
ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
private:
/// Internal state
TileAccessIterator tile_access_iterator_;
public:
/// Constructor
CUTLASS_HOST_DEVICE
TileIterator(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
tile_access_iterator_(params, problem_size, ptr, thread_idx, threadblock_offset) { }
CUTLASS_HOST_DEVICE
static Params getParams(ConvProblemSize const &problem_size, Layout const &layout) {
return TileAccessIterator::getParams(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
tile_access_iterator_.set_iteration_index(index);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
tile_access_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIterator &operator++() {
tile_access_iterator_.advance();
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIterator operator++(int) {
TileIterator self(*this);
operator++();
return self;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.clear();
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[idx],
tile_access_iterator_.get() + pointer_offset,
tile_access_iterator_.valid()
);
++tile_access_iterator_;
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
tile_access_iterator_.set_iteration_index(0);
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void advance() {
tile_access_iterator_.advance();
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// dispatch to iterator implementation
return TileAccessIterator::can_implement(problem_size);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Strided Dgrad Tile Iterator
template <typename TileAccessIterator_>
class TileIteratorStridedDgrad {
public:
using TileAccessIterator = TileAccessIterator_;
using Shape = typename TileAccessIterator::Shape;
using Element = typename TileAccessIterator::Element;
using Layout = typename TileAccessIterator::Layout;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = typename TileAccessIterator::ThreadMap;
using AccessType = typename TileAccessIterator::AccessType;
using TensorRef = typename TileAccessIterator::TensorRef;
using Index = typename TileAccessIterator::Index;
using LongIndex = typename TileAccessIterator::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = TileAccessIterator::kIteratorAlgorithm;
static StrideSupport const kStrideSupport = TileAccessIterator::kStrideSupport;
using Params = typename TileAccessIterator::Params;
static int const kConvDim = TileAccessIterator::kConvDim;
using ConvProblemSize = typename TileAccessIterator::ConvProblemSize;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<
Element,
ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
private:
/// Internal state
TileAccessIterator tile_access_iterator_;
public:
/// Constructor (output gradient (Dy) OperandA ctor)
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
tile_access_iterator_(
params,
problem_size,
ptr,
thread_idx,
stride_h_divmod, stride_w_divmod,
start_r, start_s,
threadblock_offset) { }
/// Constructor (filter (w) OperandB ctor)
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
tile_access_iterator_(params,
problem_size,
ptr,
thread_idx,
start_r, start_s,
threadblock_offset) { }
CUTLASS_HOST_DEVICE
static Params getParams(ConvProblemSize const &problem_size, Layout const &layout) {
return TileAccessIterator::getParams(problem_size, layout);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
tile_access_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad &operator++() {
tile_access_iterator_.advance();
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad operator++(int) {
TileIteratorStridedDgrad self(*this);
operator++();
return self;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.clear();
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[c + s * ThreadMap::Iterations::kContiguous],
tile_access_iterator_.get() + pointer_offset,
tile_access_iterator_.valid()
);
++tile_access_iterator_;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
tile_access_iterator_.set_iteration_index(0);
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void advance() {
tile_access_iterator_.advance();
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// dispatch to iterator implementation
return TileAccessIterator::can_implement(problem_size);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,202 | C | 32.14497 | 100 | 0.672916 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_params.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Extracts the host-params objects into non-template code.
*/
#pragma once
#define TRACE_CONV_PARAMS_INITIALIZERS_ENABLED 0
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
#include "cutlass/conv/conv3d_problem_size.h"
#if TRACE_CONV_PARAMS_INITIALIZERS_ENABLED
#include <fstream>
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Params structure used for all Conv3d analytic tile iterators
template< typename Layout_ = layout::TensorNDHWC >
struct Conv3dAnalyticParams {
using Layout = Layout_;
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv3dAnalyticParams() { }
CUTLASS_HOST_DEVICE
Conv3dAnalyticParams(
Conv3dProblemSize const &, // unused; placeholder to match other Params interfaces.
Layout const &layout
): layout(layout) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for Conv3dFpropActivationTileIteratorOptimized
template< typename Layout_ = layout::TensorNDHWC >
struct Conv3dFpropActivationIteratorOptimizedParams;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for Conv3dFpropActivationTileIteratorOptimized
template<>
struct Conv3dFpropActivationIteratorOptimizedParams<layout::TensorNDHWC> {
using Layout = layout::TensorNDHWC;
Layout layout;
int64_t inc_next[4]; // {next S, next R, next T, next C}
int filter_c_delta; // number of logical elements to add to filter_c_
int ZPQ; // product of Z*P*Q
int PQ; // product of P*Q
FastDivmod zpq_divmod;
FastDivmod pq_divmod;
FastDivmod q_divmod;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv3dFpropActivationIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv3dFpropActivationIteratorOptimizedParams(
Conv3dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout),
PQ(problem_size.P * problem_size.Q),
ZPQ(problem_size.Z * problem_size.P * problem_size.Q),
zpq_divmod(ZPQ),
pq_divmod(PQ),
q_divmod(problem_size.Q) {
TRACE_CONV_INITIALIZERS("conv3d_fprop", "activation",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
int conv_sign = (problem_size.mode == Mode::kConvolution ? -1 : 1);
// next S
inc_next[0] = conv_sign * (
int64_t(layout.stride()[0]) * problem_size.dilation_w
) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
int64_t(layout.stride()[1]) * problem_size.dilation_h
- (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next T
inc_next[2] = conv_sign * (
int64_t(layout.stride()[2]) * problem_size.dilation_d
- (problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h
- (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next C
inc_next[3] = (
threadblock_shape.column() * problem_size.split_k_slices
- conv_sign * int64_t(problem_size.T - 1) * layout.stride()[2] * problem_size.dilation_d
- conv_sign * int64_t(problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h
- conv_sign * int64_t(problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_c_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Layout_ = layout::TensorNDHWC >
struct Conv3dFpropFilterIteratorOptimizedParams;
/////////////////////////////////////////////////////////////////////////////////////////////////
template<>
struct Conv3dFpropFilterIteratorOptimizedParams<layout::TensorNDHWC>
{
using Layout = layout::TensorNDHWC;
Layout layout;
int TRS;
int filter_c_delta;
int64_t inc_next_k; // offset in units of bytes to next K position
int64_t inc_next_trs; // offset in units of bytes to next TRS position
int64_t inc_next_c; // offset in units of bytes to next C position
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv3dFpropFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv3dFpropFilterIteratorOptimizedParams(
Conv3dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout) {
TRACE_CONV_INITIALIZERS("conv3d_fprop", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
TRS = problem_size.T * problem_size.R * problem_size.S;
inc_next_k = (int64_t(layout.stride()[3]) * threadmap_delta.strided() * element_size_bits) / 8;
inc_next_trs =
( int64_t(layout.stride()[0])
- int64_t(layout.stride()[3]) * (threadmap_iterations.strided() - 1) * threadmap_delta.strided()
) * element_size_bits / 8;
inc_next_c =
(
threadblock_shape.row() * problem_size.split_k_slices
- int64_t(TRS - 1) * layout.stride()[0]
- int64_t(threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[3]
) * element_size_bits / 8;
filter_c_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters object for Conv3d DGRAD OutputGradient (dy) iterator
struct Conv3dDgradOutputGradientIteratorOptimizedParams {
using Layout = layout::TensorNDHWC;
Layout layout;
int64_t inc_next[4]; // {next S, next R, next T, next K}
int filter_k_delta; // number of logical elements to add to filter_k_
FastDivmod dhw_divmod;
FastDivmod hw_divmod;
FastDivmod w_divmod;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientIteratorOptimizedParams(
Conv3dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout),
dhw_divmod(problem_size.D * problem_size.H * problem_size.W),
hw_divmod(problem_size.H * problem_size.W),
w_divmod(problem_size.W) {
TRACE_CONV_INITIALIZERS("conv3d_dgrad", "output_gradient",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
int conv_sign = (problem_size.mode == Mode::kConvolution ? 1 : -1);
// next S
inc_next[0] = conv_sign * (
int64_t(layout.stride()[0]) * problem_size.dilation_w
) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
int64_t(layout.stride()[1]) * problem_size.dilation_h
- (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next T
inc_next[2] = conv_sign * (
int64_t(layout.stride()[2]) * problem_size.dilation_d
- (problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h
- (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next K
inc_next[3] = (
threadblock_shape.column() * problem_size.split_k_slices
- conv_sign * int64_t(problem_size.T - 1) * layout.stride()[2] * problem_size.dilation_d
- conv_sign * int64_t(problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h
- conv_sign * int64_t(problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_k_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters object for Conv2d DGRAD Filter (w) iterator
struct Conv3dDgradFilterIteratorOptimizedParams {
using Layout = layout::TensorNDHWC;
Layout layout;
int TRS;
int filter_k_delta;
int64_t inc_next_strided; // offset in units of bytes to next K coordinate within tile
int64_t inc_next_trs; // offset in units of bytes to next TRS position
int64_t inc_next_k; // offset in units of bytes to next K position in subsequent tile
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv3dDgradFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv3dDgradFilterIteratorOptimizedParams(
Conv3dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout), TRS(problem_size.T * problem_size.R * problem_size.S) {
TRACE_CONV_INITIALIZERS("conv3d_dgrad", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
inc_next_strided = ((int64_t)layout.stride()[3] * threadmap_delta.strided() * element_size_bits) / 8;
inc_next_trs =
( (int64_t)layout.stride()[0]
- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[3]
) * element_size_bits / 8;
inc_next_k =
(
threadblock_shape.row() * problem_size.split_k_slices * (int64_t)layout.stride()[3]
- (problem_size.T * problem_size.R * problem_size.S - 1) * (int64_t)layout.stride()[0]
- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[3]
) * element_size_bits / 8;
filter_k_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
};
/// Parameters object for Conv3d WGRAD OutputGradient iterator
struct Conv3dWgradOutputGradientIteratorOptimizedParams {
using Layout = layout::TensorNDHWC;
using LongIndex = typename Layout::LongIndex;
Layout layout;
int NZPQ; // precomputd product of N*Z*P*Q for clearing predicates
int ZPQ; // product of Z*P*Q
unsigned zpq_mul; // precomputed quantities for fast computation of div/% by ZPQ
unsigned zpq_shr; // in device code.
int PQ; // product of P*Q
unsigned pq_mul; // precomputed quantities for fast computation of div/% by PQ
unsigned pq_shr; // in device code.
unsigned q_mul; // precomputed quantities for fast computation of div/% by Q
unsigned q_shr; // in device code.
LongIndex offset_next_strided; // offset in units of bytes to next nzpq coordinate within tile
LongIndex offset_next_contiguous; // offset in units of bytes to next k coordinate within tile
LongIndex inc_next_nzpq; // offset in units of bytes to next nzpq position in subsequent tile
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientIteratorOptimizedParams(
Conv3dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits,
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
): layout(layout) {
TRACE_CONV_INITIALIZERS("conv3d_wgrad", "output_gradient",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
// Incremental offsets in unites of bytes (number of elements) * element_size_bits / 8
offset_next_strided = (threadmap_delta.strided() * (int64_t)layout.stride()[0])
* element_size_bits / 8;
offset_next_contiguous = (threadmap_delta.contiguous())
* element_size_bits / 8;
inc_next_nzpq = (threadblock_shape.column() * problem_size.split_k_slices * (int64_t)layout.stride()[0])
* element_size_bits / 8;
// Precompute several quantities for fast modulo arithmetic.
NZPQ = problem_size.N * problem_size.Z * problem_size.P * problem_size.Q;
ZPQ = problem_size.Z * problem_size.P * problem_size.Q;
find_divisor(zpq_mul, zpq_shr, ZPQ);
PQ = problem_size.P * problem_size.Q;
find_divisor(pq_mul, pq_shr, PQ);
find_divisor(q_mul, q_shr, problem_size.Q);
}
};
/// Parameters object for Conv3d WGRAD Activation Tile Access Iterator
struct Conv3dWgradActivationIteratorOptimizedParams {
using Layout = layout::TensorNDHWC;
Layout layout;
int RSC; // product of R*S*C
unsigned rsc_mul; // precomputed quantities for fast computation of div/% by RSC
unsigned rsc_shr; // in device code.
int SC; // product of S*C
unsigned sc_mul; // precomputed quantities for fast computation of div/% by SC
unsigned sc_shr; // in device code.
unsigned c_mul; // precomputed quantities for fast computation of div/% by C
unsigned c_shr; // in device code.
int ZPQ; // product of Z*P*Q
unsigned zpq_mul; // precomputed quantities for fast computation of div/% by ZPQ
unsigned zpq_shr; // in device code.
int PQ; // product of P*Q
unsigned pq_mul; // precomputed quantities for fast computation of div/% by PQ
unsigned pq_shr; // in device code.
unsigned q_mul; // precomputed quantities for fast computation of div/% by Q
unsigned q_shr; // in device code.
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv3dWgradActivationIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv3dWgradActivationIteratorOptimizedParams(
Conv3dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits,
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
): layout(layout) {
TRACE_CONV_INITIALIZERS("conv3d_wgrad", "activation",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
// Precompute several quantities for fast modulo arithmetic.
RSC = problem_size.R * problem_size.S * problem_size.C;
find_divisor(rsc_mul, rsc_shr, RSC);
SC = problem_size.S * problem_size.C;
find_divisor(sc_mul, sc_shr, SC);
find_divisor(c_mul, c_shr, problem_size.C);
ZPQ = problem_size.Z * problem_size.P * problem_size.Q;
find_divisor(zpq_mul, zpq_shr, ZPQ);
PQ = problem_size.P * problem_size.Q;
find_divisor(pq_mul, pq_shr, PQ);
find_divisor(q_mul, q_shr, problem_size.Q);
}
};
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 18,249 | C | 34.854617 | 110 | 0.633131 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/depthwise_mma_core_with_lane_access_size.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting depthwise related simt instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/warp/mma_depthwise_simt.h"
#include "cutlass/gemm/threadblock/mma_pipelined.h"
#include "cutlass/gemm/threadblock/mma_singlestage.h"
#include "cutlass/gemm/threadblock/mma_base.h"
#include "cutlass/conv/threadblock/depthwise_mma_base.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h"
#include "cutlass/arch/cache_operation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
namespace detail {
//
// Convert a WarpShapeM which is the whole tile of elements into the number of elements (2D) held by
// each partitions within warp.
// The goal is for each thread's tile of elements to be as square as
// possible for performance (4x4 will be faster than 2x8).
template<int WarpShapeM, // The number of elements (1D) contained in the entire warp
int WarpNumThreadsM> // The number of partitions within the warp
struct SimtWarpShape {
// kP * kQ * WarpNumThreadsM = WarpShapeM
// If needed, enable more specializations.
};
template <>
struct SimtWarpShape<4, 4> {
static constexpr int kP = 1;
static constexpr int kQ = 1;
};
template <>
struct SimtWarpShape<4, 2> {
static constexpr int kP = 2;
static constexpr int kQ = 1;
};
template <>
struct SimtWarpShape<4, 1> {
static constexpr int kP = 2;
static constexpr int kQ = 2;
};
template <>
struct SimtWarpShape<8, 1> {
static constexpr int kP = 2;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<8, 2> {
static constexpr int kP = 2;
static constexpr int kQ = 2;
};
template <>
struct SimtWarpShape<8, 4> {
static constexpr int kP = 1;
static constexpr int kQ = 2;
};
template <>
struct SimtWarpShape<16, 1> {
static constexpr int kP = 4;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<16, 2> {
static constexpr int kP = 2;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<16, 4> {
static constexpr int kP = 2;
static constexpr int kQ = 2;
};
template <int WarpNumThreadsM>
struct SimtWarpShape<25, WarpNumThreadsM> {
static_assert(WarpNumThreadsM == 1, "WarpShapeM could not be evenly splited by threads");
static constexpr int kP = 5;
static constexpr int kQ = 5;
};
template <>
struct SimtWarpShape<32, 1> {
static constexpr int kP = 4;
static constexpr int kQ = 8;
};
template <>
struct SimtWarpShape<32, 2> {
static constexpr int kP = 4;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<32, 4> {
static constexpr int kP = 2;
static constexpr int kQ = 4;
};
} // namespace detail
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_ = 0,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeB_ = 0,
/// Number of stages
int Stages = 2,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global,
/// per-element transformation for elements of A
ComplexTransform TransformA = ComplexTransform::kNone,
/// per-element transformation for elements of B
ComplexTransform TransformB = ComplexTransform::kNone,
bool IsComplex = false // (is_complex<ElementA>::value || is_complex<ElementB>::value)
>
struct DepthwiseMmaCoreWithLaneAccessSize;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of threadblock-scoped output tile
typename ThreadBlockOutputShape,
/// Shape of filter shape per threadblock
typename FilterShape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_ = 0,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeB_ = 0,
/// Number of stages
int Stages = 2,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Iterator algo type
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape = cutlass::MatrixShape<-1, -1>,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape = cutlass::MatrixShape<-1, -1>,
/// Activation Shape loaded by threadblock
typename ActivationShape = cutlass::conv::TensorNHWCShape<-1,-1,-1,-1>,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global,
/// per-element transformation for elements of A
ComplexTransform TransformA = ComplexTransform::kNone,
/// per-element transformation for elements of B
ComplexTransform TransformB = ComplexTransform::kNone,
bool IsComplex = false // (is_complex<ElementA>::value || is_complex<ElementB>::value)
>
struct DepthwiseDirectConvMmaCoreWithLaneAccessSize;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// per-element transformation for elements of A
ComplexTransform TransformA,
/// per-element transformation for elements of B
ComplexTransform TransformB,
bool IsComplex
>
struct DepthwiseMmaCoreWithLaneAccessSize<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
OperatorClass, -1, -1, Stages, Operator, AccumulatorsInRowMajor,
CacheOpA, CacheOpB, TransformA, TransformB, IsComplex
> : cutlass::gemm::threadblock::DefaultMmaCore<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
OperatorClass, Stages, Operator, AccumulatorsInRowMajor,
CacheOpA, CacheOpB, TransformA, TransformB, IsComplex
> {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a warp-scoped per thread access (a value of -1 indicates the default)
int kLaneAccessSizeA_,
/// Size of a warp-scoped per thread access (a value of -1 indicates the default)
int kLaneAccessSizeB_,
/// Operation performed by GEMM
typename Operator_>
struct DepthwiseMmaCoreWithLaneAccessSize<Shape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
kLaneAccessSizeA_,
kLaneAccessSizeB_,
2,
Operator_> : public cutlass::gemm::threadblock::DefaultMmaCore<Shape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
2,
Operator_> {
using Base = cutlass::gemm::threadblock::DefaultMmaCore<Shape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
2,
Operator_>;
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const kLaneAccessSizeA = kLaneAccessSizeA_;
static int const kLaneAccessSizeB = kLaneAccessSizeB_;
// Divisility requirements
static_assert( kLaneAccessSizeA > 0 && kLaneAccessSizeB > 0,
"Size of a warp-scoped per thread access should be larger then ZERO" );
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = typename Base::WarpCount;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value;
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory are same as base class
//
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = cutlass::gemm::threadblock::detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = kLaneAccessSizeA / sizeof_bits<ElementA>::value;
static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static int const kPaddingM = cutlass::gemm::threadblock::detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value);
static int const kPaddingN = cutlass::gemm::threadblock::detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value);
static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<>
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>;
/// Policy used to define MmaPipelined
using MmaPolicy = cutlass::gemm::threadblock::MmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of threadblock-scoped output tile (concept: TensorNHWCShape)
typename ThreadBlockOutputShape_,
/// Shape of filter shape per threadblock
typename FilterShape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_,
/// Number of stages
int Stages_,
/// Operation performed by GEMM
typename Operator_>
struct DepthwiseDirectConvMmaCoreWithLaneAccessSize<Shape_,
ThreadBlockOutputShape_,
FilterShape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
kLaneAccessSizeA_,
128,
Stages_,
Operator_> {
using Shape = Shape_;
using FilterShape = FilterShape_;
using WarpShape = WarpShape_;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const kLaneAccessSizeB = 128;
// Divisility requirements
static_assert( kLaneAccessSizeB > 0,
"Size of a warp-scoped per thread access should be larger then ZERO" );
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = cutlass::gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
1
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
// For Gmem load
static int const kElementsPerAccessA = 128 / sizeof_bits<ElementA>::value;
static int const kElementsPerAccessB = 128 / sizeof_bits<ElementB>::value;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, 1>, // Set kStrided = 1 because activation shape is runtime value.
kThreads,
kElementsPerAccessA
>;
/// ThreadMap of iterator A
using SmemThreadMapA = IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<1, Shape::kN>, // set kRow is 1 because it is a runtime value
ElementA,
SmemLayoutA,
0,
SmemThreadMapA, // was IteratorThreadMapA
true // Dynamic iterations.
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, FilterShape::kCount>,
kThreads,
kElementsPerAccessB
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<FilterShape::kCount, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB, // was IteratorThreadMapB
false // static iterations.
>;
//
// Warp-level matrix multiply operator
//
// Groups per threads
// Fp32: 2 groups
// Fp16: 2 groups
static const int GroupsPerThread = sizeof(ElementB) > 1 ? 2 : 4;
// Define the warp-level op
static const int WarpNumThreadsN = cutlass::const_min(WarpShape::kN / GroupsPerThread, kWarpSize);
static const int WarpNumThreadsM = kWarpSize / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
// Get output P, Q per thread
static const int TileP = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kP;
static const int TileQ = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kQ;
static const int LaneLayout = 1;
static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value;
static const int LaneN = cutlass::const_min(numElementsB, WarpShape::kN / WarpNumThreadsN);
// Define the output tile computed by each thread
using ThreadOutputShape = cutlass::conv::TensorNHWCShape<1, TileP, TileQ, LaneN>;
// Fetch the channel with same access size
static const int LaneM = LaneN;
// No paddings
static int const kPaddingM = 0;
static int const kPaddingN = 0;
static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseDirectConvSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<>
FilterShape, /// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width>
ThreadOutputShape, /// Size of the output tile computed by thread - concept: conv::TensorNHWCShape<>
ThreadBlockOutputShape_, /// Size of the output tile computed by threadblock - concept: conv::TensorNHWCShape<>
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>;
/// Policy used to define MmaPipelined
using MmaPolicy = cutlass::conv::threadblock::DepthwiseDirectConvMmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
IteratorThreadMapA,
IteratorThreadMapB,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of threadblock-scoped output tile (concept: TensorNHWCShape)
typename ThreadBlockOutputShape_,
/// Shape of filter shape per threadblock
typename FilterShape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_,
/// Number of stages
int Stages_,
/// Operation performed by GEMM
typename Operator_,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape_,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape_,
/// Activation Shape loaded by threadblock
typename ActivationShape_>
struct DepthwiseDirectConvMmaCoreWithLaneAccessSize<Shape_,
ThreadBlockOutputShape_,
FilterShape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
kLaneAccessSizeA_,
128,
Stages_,
Operator_,
IteratorAlgorithm::kFixedStrideDilation,
StrideShape_,
DilationShape_,
ActivationShape_> {
using Shape = Shape_;
using FilterShape = FilterShape_;
using WarpShape = WarpShape_;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
using StrideShape = StrideShape_;
using DilationShape = DilationShape_;
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
using ActivationShape = ActivationShape_;
static int const kLaneAccessSizeB = 128;
// Divisility requirements
static_assert( kLaneAccessSizeB > 0,
"Size of a warp-scoped per thread access should be larger then ZERO" );
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = cutlass::gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
1
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
// For Gmem load
static int const kElementsPerAccessA = 128 / sizeof_bits<ElementA>::value;
static int const kElementsPerAccessB = 128 / sizeof_bits<ElementB>::value;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<ActivationShape::kC, ActivationShape::kNHW>,
kThreads,
kElementsPerAccessA
>;
/// ThreadMap of iterator A
using SmemThreadMapA = IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<ActivationShape::kNHW, ActivationShape::kC>,
ElementA,
SmemLayoutA,
0,
SmemThreadMapA, // was IteratorThreadMapA
false // static iterations.
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, FilterShape::kCount>,
kThreads,
kElementsPerAccessB
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<FilterShape::kCount, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB, // was IteratorThreadMapB
false // static iterations.
>;
//
// Warp-level matrix multiply operator
//
// Groups per threads
// Fp32: 2 groups
// Fp16: 2 groups
static const int GroupsPerThread = sizeof(ElementB) > 1 ? 2 : 4;
// Define the warp-level op
static const int WarpNumThreadsN = cutlass::const_min(WarpShape::kN / GroupsPerThread, kWarpSize);
static const int WarpNumThreadsM = kWarpSize / WarpNumThreadsN;
static const int TileP = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kP;
static const int TileQ = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kQ;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = 1;
static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value;
static const int LaneN = cutlass::const_min(numElementsB, WarpShape::kN / WarpNumThreadsN);
// Define the output tile computed by each thread
using ThreadOutputShape = cutlass::conv::TensorNHWCShape<1, TileP, TileQ, LaneN>;
// Fetch the channel with same access size
static const int LaneM = LaneN;
// No paddings
static int const kPaddingM = 0;
static int const kPaddingN = 0;
static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseDirectConvSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<>
FilterShape, /// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width>
ThreadOutputShape, /// Size of the output tile computed by thread - concept: conv::TensorNHWCShape<>
ThreadBlockOutputShape, /// Size of the output tile computed by threadblock - concept: conv::TensorNHWCShape<>
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
IteratorAlgorithm::kFixedStrideDilation, /// Iterator algo type
StrideShape, /// Stride ( MatrixShape<Height, Width> )
DilationShape, /// Dilation ( MatrixShape<Height, Width> )
ActivationShape /// Activation Shape loaded by threadblock
>;
/// Policy used to define MmaPipelined
using MmaPolicy = cutlass::conv::threadblock::DepthwiseDirectConvMmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
IteratorThreadMapA,
IteratorThreadMapB,
WarpCount::kK
>;
};
} // namespace threadblock
} // namespace conv
} // namespace cutlass
| 36,697 | C | 37.50787 | 142 | 0.619696 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/threadblock_swizzle.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implements several possible threadblock-swizzling functions mapping blockIdx to
Convolution problems.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
static int get_strided_dgrad_tile_m(
cutlass::conv::Conv2dProblemSize const &problem_size,
int tile_size_m) {
// CTAs in M dimension per starting filter position
int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, tile_size_m);
// Inflate number of CTAs in M dimension to cover every strating filter position even those that
// may fall out of valid MMA (Dy * w) but are needed to apply epilogue (beta * Dx_source)
// and point-wise fusion
int tile_m = tile_m_per_filter * int(problem_size.stride().product());
// There is a possible performance optimization here that leads up to 2x speeds than the current
// CUTLASS strided dgrad performance for stride > filter, i.e., stride={2x2} and filter={1x1})
//
// * Optimization *
// Only launch CTAs in M dimenstion which contribute to a row in Dx output
//
//
// * Constraints *
// (A) stride <= filter, for example, stride={2x2} and filter={3x3}:
// - (A.1): There are no constraints for this case and the optimization does
// affect this case functionality or performance.
// (B) stride > filter, for example, stride={2x2} and filter={1x1}:
// - (B.1): Dx output tensor should be zero initialized
// - (B.2): The kernel epilogue cannot apply beta. Thus, beta should be zero
return tile_m;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for strided dgrad convolution
struct StridedDgradHorizontalThreadblockSwizzle :
public gemm::threadblock::GemmHorizontalThreadblockSwizzle {
using Base = gemm::threadblock::GemmHorizontalThreadblockSwizzle;
CUTLASS_HOST_DEVICE
StridedDgradHorizontalThreadblockSwizzle() { }
/// Returns the shape of the problem in units of logical tiles
/// For ImplicitGemmConvolution Conv2d problem size: conv_operator(NPQK, NHWC, KRSC)
CUTLASS_HOST_DEVICE
gemm::GemmCoord get_tiled_shape(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
gemm::GemmCoord tile_size,
int split_k_slices) const {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
// compute number of tiles in m dimension
int tile_m = get_strided_dgrad_tile_m(problem_size, tile_size.m());
// compute number of tiles in n dimenstion
int tile_n = (implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n();
return gemm::GemmCoord(
tile_m,
tile_n,
split_k_slices);
}
/// Returns the shape of the problem in units of logical tiles
/// For GEMM problem size (MxNxK) (Do not use base class get_tiled_shape())
private:
using Base::get_tiled_shape;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for strided dgrad convolution
template <int N = 1>
struct StridedDgradIdentityThreadblockSwizzle :
public gemm::threadblock::GemmIdentityThreadblockSwizzle<N> {
using Base = gemm::threadblock::GemmIdentityThreadblockSwizzle<N>;
CUTLASS_HOST_DEVICE
StridedDgradIdentityThreadblockSwizzle() { }
/// Returns the shape of the problem in units of logical tiles
/// For ImplicitGemmConvolution Conv2d problem size: conv_operator(NPQK, NHWC, KRSC)
CUTLASS_HOST_DEVICE
gemm::GemmCoord get_tiled_shape(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
gemm::GemmCoord tile_size,
int split_k_slices) const {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
// compute number of tiles in m dimension
int tile_m = get_strided_dgrad_tile_m(problem_size, tile_size.m());
// compute number of tiles in n dimenstion
int tile_n = (implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n();
return gemm::GemmCoord(
tile_m,
tile_n,
split_k_slices);
}
/// Returns the shape of the problem in units of logical tiles
/// For GEMM problem size (MxNxK) (Do not use base class get_tiled_shape())
private:
using Base::get_tiled_shape;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for GEMMs
template <int N = 1, int Output_N = 1, int Output_P = 1, int Output_Q = 1>
struct DepthwiseDirect2dConvIdentityThreadblockSwizzle
: public gemm::threadblock::GemmIdentityThreadblockSwizzle<N> {
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvIdentityThreadblockSwizzle() {}
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
gemm::GemmCoord get_tiled_shape(cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
gemm::GemmCoord tile_size,
int split_k_slices) const {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
return gemm::GemmCoord(1,
(implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n(),
split_k_slices);
}
};
} // namespace threadblock
} // namespace conv
} // namespace cutlass
| 8,050 | C | 40.5 | 100 | 0.643975 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity
>
class Conv3dDgradOutputGradientTileAccessIteratorOptimized {
public:
static_assert(StrideSupport_ == conv::StrideSupport::kUnity,
"Only unit-stride dgrad is supported at this time.");
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
using Coord3D = Coord<3>;
static int const kAccessesPerVector = 1;
using Mask = uint64_t;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv3dDgradOutputGradientIteratorOptimizedParams;
private:
Params const ¶ms_;
ConvProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
// current filter position (t, r, s)
int filter_t_;
int filter_r_;
int filter_s_;
int filter_k_;
Index masks_[ThreadMap::Iterations::kStrided][3];
public:
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientTileAccessIteratorOptimized(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
filter_k_(0),
filter_t_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
int offset_n[ThreadMap::Iterations::kStrided];
int offset_d[ThreadMap::Iterations::kStrided];
int offset_h[ThreadMap::Iterations::kStrided];
int offset_w[ThreadMap::Iterations::kStrided];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_ndhw = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// offset_n[s] = offset_ndhw / (problem_size_.D * problem_size_.H * problem_size_.W);
// int residual = offset_ndhw % (problem_size_.D * problem_size_.H * problem_size_.W);
//
//
// offset_d[s] = residual / (problem_size_.H * problem_size_.W);
// residual = residual % (problem_size_.H * problem_size_.W);
//
// offset_h[s] = residual / problem_size_.W;
// offset_w[s] = residual % problem_size_.W;
//
int residual;
// input: (ndhw offset) output: (n offset and resudial (dhw offset))
params_.dhw_divmod(offset_n[s], residual, offset_ndhw);
// input: (dhw offset) output: (d offset and resudial (hw))
params_.hw_divmod(offset_d[s], residual, residual);
// input: (hw offset) output: (h offset and resudial (w offset))
params_.w_divmod(offset_h[s], offset_w[s], residual);
TensorCoord coord = at_(offset_n[s], offset_d[s], offset_h[s], offset_w[s], 0, 0, 0);
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
clear_mask();
CUTLASS_PRAGMA_NO_UNROLL
for (int t = 0; t < problem_size_.T; ++t) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int t_ = t;
if (problem_size_.mode == Mode::kConvolution) {
t_ = problem_size_.T - 1 - t;
}
int z = offset_d[s_idx] + problem_size_.pad_d - t_ * problem_size_.dilation_d;
bool pred = (offset_n[s_idx] < problem_size_.N && z >= 0 && z < problem_size_.Z);
masks_[s_idx][0] |= (pred << t);
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int r = 0; r < problem_size_.R; ++r) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int r_ = r;
if (problem_size_.mode == Mode::kConvolution) {
r_ = problem_size_.R - 1 - r;
}
int p = offset_h[s_idx] + problem_size_.pad_h - r_ * problem_size_.dilation_h;
bool pred = (p >= 0 && p < problem_size_.P);
masks_[s_idx][1] |= (pred << r);
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int s = 0; s < problem_size_.S; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int s_ = s;
if (problem_size_.mode == Mode::kConvolution) {
s_ = problem_size_.S - 1 - s;
}
int q = offset_w[s_idx] + problem_size_.pad_w - s_ * problem_size_.dilation_w;
bool pred = (q >= 0 && q < problem_size_.Q);
masks_[s_idx][2] |= (pred << s);
}
}
if (filter_k_ >= problem_size.K) {
clear_mask();
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided});
}
private:
/// Returns the coordinate in the output gradient tensor dy that is correspoinding to
// activation ndhw and filter position k, t, r, s
CUTLASS_HOST_DEVICE
TensorCoord at_(int n, int d, int h, int w, int t, int r, int s) const {
if (problem_size_.mode == Mode::kConvolution) {
t = problem_size_.T - 1 - t;
r = problem_size_.R - 1 - r;
s = problem_size_.S - 1 - s;
}
int z = d + problem_size_.pad_d - t * problem_size_.dilation_d;
int p = h + problem_size_.pad_h - r * problem_size_.dilation_h;
int q = w + problem_size_.pad_w - s * problem_size_.dilation_w;
return TensorCoord(n, z, p, q, filter_k_);
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset;
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask_(bool clear) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
// We are using inline PTX assembly here to avoid an CUDA C++ compilation
// artifact in which control flow instructions are generated. Instead, our
// intent is to predicate the mov instructions.
#if defined(__CUDA_ARCH__)
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][0])
:
"r"((int)clear),
"r"(masks_[s][0])
);
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][1])
:
"r"((int)clear),
"r"(masks_[s][1])
);
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][2])
:
"r"((int)clear),
"r"(masks_[s][2])
);
#else
if (clear) {
masks_[s][0] = 0;
masks_[s][1] = 0;
masks_[s][2] = 0;
}
#endif
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_HOST_DEVICE
void advance() {
int next_idx = 0;
// moves to the next tile
++filter_s_;
if (filter_s_ == problem_size_.S) {
filter_s_ = 0;
++filter_r_;
next_idx = 1;
if (filter_r_ == problem_size_.R) {
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
next_idx = 2;
}
else {
filter_t_ = 0;
next_idx = 3;
}
}
}
add_byte_offset_(params_.inc_next[next_idx]);
if (next_idx == 3) {
filter_k_ += params_.filter_k_delta;
}
clear_mask_(filter_k_ >= problem_size_.K);
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask() {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][0] = Mask(0);
masks_[s][1] = Mask(0);
masks_[s][2] = Mask(0);
}
}
CUTLASS_HOST_DEVICE
bool valid() {
return
(masks_[iteration_strided_][0] & (Index(1) << filter_t_)) &&
(masks_[iteration_strided_][1] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][2] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientTileAccessIteratorOptimized &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// This is specialized for unit stride
if (problem_size.stride() != Coord3D({1, 1, 1})) {
return Status::kErrorNotSupported;
}
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % (128/sizeof_bits<Element>::value)) {
return Status::kErrorNotSupported;
}
// Limit on filter size
if (problem_size.T > 32 || problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 15,014 | C | 29.642857 | 114 | 0.58905 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/predicated_scale_bias_vector_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of scale and bias vectors.
This iterator uses masks to guard out-of-bounds accesses.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedScaleBiasVectorIterator
///
template <typename WarpShape,
typename Element,
typename Layout>
class PredicatedScaleBiasVectorIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for wgrad pitch-linear data.
///
template <typename WarpShape_, typename Element_>
class PredicatedScaleBiasVectorIterator<WarpShape_,
Element_,
layout::PitchLinear> {
public:
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kElementsPerAccess = 1;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
static int const kIterations = WarpShape::kContiguous / 8;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<__half2, 2 * kIterations * kElementsPerAccess>;
/// Parameters object is precomputed state and is host-constructible
using Params = Conv2dWgradActivationIteratorOptimizedParams;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
ConstPointer scale_pointer_;
ConstPointer bias_pointer_;
/// Size of tensor
Conv2dProblemSize problem_size_;
int32_t thread_offset_;
// Channel dimension in contiguous dimension stays constant for each gemm_iteration_k
int32_t filter_c_[kIterations];
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv2dProblemSize const &problem_size,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
problem_size_(problem_size),
scale_pointer_(scale_pointer),
bias_pointer_(bias_pointer) {
thread_offset_ = threadblock_offset.contiguous() + (thread_id % 32) / 4;
}
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv2dProblemSize const &problem_size,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Advances an iterator along logical dimensions of matrix in units of whole warp tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
thread_offset_ += (WarpShape::kContiguous * tile_offset.contiguous());
CUTLASS_PRAGMA_UNROLL
for(int c = 0; c < kIterations; ++c) {
int rsc_offset = thread_offset_ + c * 8;
int residual, tmp;
params_.sc_divmod(tmp, residual, rsc_offset);
params_.c_divmod(tmp, filter_c_[c], residual);
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.fill(__float2half2_rn(0.0f));
__half2 *frag_ptr = reinterpret_cast<__half2 *>(&frag);
// load scale
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
cutlass::arch::global_load<
__half,
sizeof(AccessType)
>(
frag_ptr[c * 2].x,
scale_pointer_ + filter_c_[c],
true
);
}
// load bias
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
cutlass::arch::global_load<
__half,
sizeof(AccessType)
>(
frag_ptr[c * 2 + 1].x,
bias_pointer_ + filter_c_[c],
true
);
}
// duplicate scale
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
frag_ptr[c * 2].y = frag_ptr[c * 2].x;
}
// duplicate bias
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
frag_ptr[c * 2 + 1].y = frag_ptr[c * 2 + 1].x;
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename WarpShape_,
typename Element_>
class PredicatedScaleBiasVectorIterator<WarpShape_,
Element_,
layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedScaleBiasVectorIterator<
layout::PitchLinearShape<WarpShape::kColumn, WarpShape::kRow>,
Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
using Fragment = typename UnderlyingIterator::Fragment;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedScaleBiasVectorIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Conv2dProblemSize const &problem_size, Layout const &layout)
: params_(problem_size, layout::TensorNHWC(0, 0, 0)){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Extent of tensor
Conv2dProblemSize const &problem_size,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, problem_size, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
Params const ¶ms, ///< Precomputed parameters object
Conv2dProblemSize const &problem_size, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Advances an iterator along logical dimensions of matrix in units of whole
/// threadblock tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
iterator_.load(frag);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 12,476 | C | 32.540322 | 100 | 0.639468 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_params.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Extracts the host-params objects into non-template code.
*/
#pragma once
#define TRACE_CONV_PARAMS_INITIALIZERS_ENABLED 0
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#if TRACE_CONV_PARAMS_INITIALIZERS_ENABLED
#include <fstream>
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Params structure used for all Conv2d analytic tile iterators
template< typename Layout_ = layout::TensorNHWC >
struct Conv2dAnalyticParams {
using Layout = Layout_;
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dAnalyticParams() { }
CUTLASS_HOST_DEVICE
Conv2dAnalyticParams(
Conv2dProblemSize const &, // unused; placeholder to match other Params interfaces.
Layout const &layout
): layout(layout) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Params structure used for all Conv2d analytic tile iterators
template< typename Layout_ = layout::TensorNHWC >
struct Conv2dFewChannelsParams {
using Layout = Layout_;
int32_t stride_w;
int32_t stride_h;
int32_t stride_n;
FastDivmod divmod_P;
FastDivmod divmod_Q;
FastDivmod divmod_S;
FastDivmod divmod_C;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFewChannelsParams() { }
CUTLASS_HOST_DEVICE
Conv2dFewChannelsParams(
Conv2dProblemSize const &problem_size, // unused; placeholder to match other Params interfaces.
Layout const &layout
):
stride_w(int32_t(layout.stride()[0])),
stride_h(int32_t(layout.stride()[1])),
stride_n(int32_t(layout.stride()[2])),
divmod_P(problem_size.P),
divmod_Q(problem_size.Q),
divmod_S(problem_size.S),
divmod_C(problem_size.C)
{
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for Conv2dDgradOutputGradientTileAccessIteratorAnalyticParams
struct Conv2dDgradOutputGradientTileAccessIteratorAnalyticParams {
using Layout = layout::TensorNHWC;
Layout layout;
int tiled_rows_per_filter;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorAnalyticParams() { }
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorAnalyticParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape
): layout(layout) {
int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, threadblock_shape.row());
tiled_rows_per_filter = tile_m_per_filter * threadblock_shape.row();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
#if TRACE_CONV_PARAMS_INITIALIZERS_ENABLED
CUTLASS_HOST_DEVICE
void TraceIteratorParams(
char const *conv_operator,
char const *operand,
int element_size_bits,
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
) {
#if !defined(__CUDA_ARCH__)
char const *fname = "conv_iterator_params.csv";
std::ifstream test(fname);
bool file_exists = test.is_open();
if (file_exists) {
test.close();
}
std::ofstream trace("conv_iterator_params.csv", std::ofstream::app);
if (!file_exists) {
trace
<< "Operator,Operand,ElementSize,CtaRows,CtaColumns,ThreadCount,AccessSize,"
<< "IterationsContiguous,IterationsStrided,DeltaContiguous,DeltaStrided\n";
}
trace << conv_operator << "," << operand << "," << element_size_bits << ","
<< threadblock_shape.row() << "," << threadblock_shape.column()
<< "," << thread_count << "," << access_size
<< "," << threadmap_iterations.contiguous() << "," << threadmap_iterations.strided()
<< "," << threadmap_delta.contiguous() << "," << threadmap_delta.strided() << "\n";
#endif
}
#define TRACE_CONV_INITIALIZERS(conv_op, operand, element_size, cta_shape, thread_count, access_size, iterations, delta) \
TraceIteratorParams(conv_op, operand, element_size, cta_shape, thread_count, access_size, iterations, delta);
#else
#define TRACE_CONV_INITIALIZERS(conv_op, operand, element_size, cta_shape, thread_count, access_size, iterations, delta) {}
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for Conv2dFpropActivationTileIteratorOptimized
template< typename Layout_ = layout::TensorNHWC >
struct Conv2dFpropActivationIteratorOptimizedParams;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for Conv2dFpropActivationTileIteratorOptimized
template<>
struct Conv2dFpropActivationIteratorOptimizedParams<layout::TensorNHWC> {
using Layout = layout::TensorNHWC;
Layout layout;
int64_t inc_next[3]; // {next S, next R, next C}
int filter_c_delta; // number of logical elements to add to filter_c_
int PQ; // product of P*Q
FastDivmod pq_divmod;
FastDivmod q_divmod;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFpropActivationIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dFpropActivationIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout),
PQ(problem_size.P * problem_size.Q),
pq_divmod(PQ),
q_divmod(problem_size.Q) {
TRACE_CONV_INITIALIZERS("conv2d_fprop", "activation",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
int conv_sign = (problem_size.mode == Mode::kConvolution ? -1 : 1);
// next S
inc_next[0] = conv_sign * (
int64_t(layout.stride()[0]) * problem_size.dilation_w
) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
int64_t(layout.stride()[1]) * problem_size.dilation_h
- (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next C
inc_next[2] = (
threadblock_shape.column() * problem_size.split_k_slices
- conv_sign * int64_t(problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h
- conv_sign * int64_t(problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_c_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
#if ENABLE_CONV2D_PARAMS_PRINT
/// Prints internal state.
CUTLASS_HOST_DEVICE
void print() {
auto stride = layout.stride();
printf(
"Conv2dFpropActivationIteratorOptimizedParams:\n"
" layout(w: %d, h: %d, n: %d)\n"
" inc_next[%ld, %ld, %ld]\n"
" filter_c_delta(%d) - PQ(%d)\n"
" pq_divmod(divisor: %d, multiplier: %u, shift_right: %u)\n"
" q_divmod(divisor: %d, multiplier: %u, shift_right: %u)\n",
stride[0], stride[1], stride[2],
inc_next[0], inc_next[1], inc_next[2],
filter_c_delta,
PQ,
pq_divmod.divisor,
pq_divmod.multiplier,
pq_divmod.shift_right,
q_divmod.divisor,
q_divmod.multiplier,
q_divmod.shift_right
);
}
#endif
};
/// Parameters structure used for Conv2dFpropActivationTileIteratorOptimized
template <int Interleaved_>
struct Conv2dFpropActivationIteratorOptimizedParams<layout::TensorNCxHWx<Interleaved_>> {
static int const kInterleaved = Interleaved_;
using Layout = layout::TensorNCxHWx<kInterleaved>;
Layout layout;
int64_t inc_next[3]; // {next S, next R, next C}
int filter_c_delta; // number of logical elements to add to filter_c_
int PQ; // product of P*Q
FastDivmod pq_divmod;
FastDivmod q_divmod;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFpropActivationIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dFpropActivationIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout), PQ(problem_size.P * problem_size.Q), pq_divmod(PQ), q_divmod(problem_size.Q) {
TRACE_CONV_INITIALIZERS("conv2d_fprop", "activation",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
int conv_sign = (problem_size.mode == Mode::kConvolution ? -1 : 1);
// next S
inc_next[0] = conv_sign * (kInterleaved * problem_size.dilation_w) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
int64_t(layout.stride()[0]) * problem_size.dilation_h
- (problem_size.S - 1) * kInterleaved * problem_size.dilation_w
) * element_size_bits / 8;
// next C
inc_next[2] = (
threadblock_shape.column() * problem_size.split_k_slices / kInterleaved * int64_t(layout.stride()[1])
- conv_sign * int64_t(problem_size.R - 1) * layout.stride()[0] * problem_size.dilation_h
- conv_sign * int64_t(problem_size.S - 1) * kInterleaved * problem_size.dilation_w
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_c_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Layout_ = layout::TensorNHWC >
struct Conv2dFpropFilterIteratorOptimizedParams;
/////////////////////////////////////////////////////////////////////////////////////////////////
template<>
struct Conv2dFpropFilterIteratorOptimizedParams<layout::TensorNHWC>
{
using Layout = layout::TensorNHWC;
Layout layout;
int RS;
int filter_c_delta;
int64_t inc_next_k; // offset in units of bytes to next K position
int64_t inc_next_rs; // offset in units of bytes to next RS position
int64_t inc_next_c; // offset in units of bytes to next C position
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFpropFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dFpropFilterIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout) {
TRACE_CONV_INITIALIZERS("conv2d_fprop", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
RS = problem_size.R * problem_size.S;
inc_next_k = (int64_t(layout.stride()[2]) * threadmap_delta.strided() * element_size_bits) / 8;
inc_next_rs =
( int64_t(layout.stride()[0])
- int64_t(layout.stride()[2]) * (threadmap_iterations.strided() - 1) * threadmap_delta.strided()
) * element_size_bits / 8;
inc_next_c =
(
threadblock_shape.row() * problem_size.split_k_slices
- int64_t(RS - 1) * layout.stride()[0]
- int64_t(threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[2]
) * element_size_bits / 8;
filter_c_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
#if ENABLE_CONV2D_PARAMS_PRINT
/// Prints internal state.
CUTLASS_HOST_DEVICE
void print() {
auto stride = layout.stride();
printf(
"Conv2dFpropFilterIteratorOptimizedParams:\n"
" layout[%d, %d, %d]\n"
" RS(%d), filter_c_delta(%d), inc_next(k: %ld, rs: %ld, c: %ld)\n",
stride[0], stride[1], stride[2],
RS,
filter_c_delta,
inc_next_k, inc_next_rs, inc_next_c
);
}
#endif
};
template<int Interleaved_>
struct Conv2dFpropFilterIteratorOptimizedParams<layout::TensorCxRSKx<Interleaved_>>
{
static int const kInterleaved = Interleaved_;
using Layout = layout::TensorCxRSKx<kInterleaved>;
Layout layout;
int RS;
int filter_c_delta;
int64_t inc_next_k; // offset in units of bytes to next K position
int64_t inc_next_rs; // offset in units of bytes to next RS position
int64_t inc_next_c; // offset in units of bytes to next C position
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFpropFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dFpropFilterIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout) {
TRACE_CONV_INITIALIZERS("conv2d_fprop", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
RS = problem_size.R * problem_size.S;
inc_next_k = (kInterleaved * threadmap_delta.strided() * element_size_bits) / 8;
inc_next_rs =
( int64_t(layout.stride()[0])
- kInterleaved * (threadmap_iterations.strided() - 1) * threadmap_delta.strided()
) * element_size_bits / 8;
inc_next_c =
(
threadblock_shape.row() * problem_size.split_k_slices / kInterleaved * int64_t(layout.stride()[2])
- int64_t(RS - 1) * layout.stride()[0]
- int64_t(threadmap_iterations.strided() - 1) * threadmap_delta.strided() * kInterleaved
) * element_size_bits / 8;
filter_c_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Dgrad Optimized Dy params (layout::TensorNHWC)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters object for Conv2d DGRAD OutputGradient (dy) iterator
struct Conv2dDgradOutputGradientIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int64_t inc_next[3]; // {next S, next R, next K}
int filter_k_delta; // number of logical elements to add to filter_k_
int HW; // product of H*W
FastDivmod hw_divmod;
FastDivmod w_divmod;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout),
HW(problem_size.H *problem_size.W),
hw_divmod(HW),
w_divmod(problem_size.W) {
TRACE_CONV_INITIALIZERS("conv2d_dgrad", "output_gradient",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
int conv_sign = (problem_size.mode == Mode::kConvolution ? 1 : -1);
// next S
inc_next[0] = conv_sign * (
(int64_t)layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
(int64_t)layout.stride()[1] * problem_size.dilation_h
- (problem_size.S - 1) * (int64_t)layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next K
inc_next[2] = (
threadblock_shape.column() * problem_size.split_k_slices
- conv_sign * (problem_size.R - 1) * (int64_t)layout.stride()[1] * problem_size.dilation_h
- conv_sign * (problem_size.S - 1) * (int64_t)layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_k_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Strided Dgrad Optimized Dy params (layout::TensorNHWC)
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Conv2dStridedDgradOutputGradientIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int64_t inc_next[3]; // {next S, next R, next K}
int filter_k_delta; // number of logical elements to add to filter_k_
int tiled_rows_per_filter;
int conv_sign;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dStridedDgradOutputGradientIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dStridedDgradOutputGradientIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape
): layout(layout) {
int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, threadblock_shape.row());
tiled_rows_per_filter = tile_m_per_filter * threadblock_shape.row();
conv_sign = (problem_size.mode == Mode::kConvolution ? 1 : -1);
// next S
inc_next[0] = conv_sign * (
(int64_t)layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
(int64_t)layout.stride()[1] * problem_size.dilation_h
) * element_size_bits / 8;
// next K
inc_next[2] = (
threadblock_shape.column() * problem_size.split_k_slices
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_k_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
// Dgrad Optimized w params (layout::TensorNHWC)
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Conv2dDgradFilterIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int RS;
int filter_k_delta;
int64_t inc_next_strided; // offset in units of bytes to next K coordinate within tile
int64_t inc_next_rs; // offset in units of bytes to next RS position
int64_t inc_next_k; // offset in units of bytes to next K position in subsequent tile
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dDgradFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dDgradFilterIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout), RS(problem_size.R * problem_size.S) {
TRACE_CONV_INITIALIZERS("conv2d_dgrad", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
inc_next_strided = ((int64_t)layout.stride()[2] * threadmap_delta.strided() * element_size_bits) / 8;
inc_next_rs =
( (int64_t)layout.stride()[0]
- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[2]
) * element_size_bits / 8;
inc_next_k =
(
threadblock_shape.row() * problem_size.split_k_slices * (int64_t)layout.stride()[2]
- (problem_size.R * problem_size.S - 1) * (int64_t)layout.stride()[0]
- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[2]
) * element_size_bits / 8;
filter_k_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
// StridedDgrad Optimized w params (layout::TensorNHWC)
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Conv2dStridedDgradFilterIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int RS;
int filter_k_delta;
int64_t inc_next_strided; // offset in units of bytes to next K coordinate within tile
int64_t inc_next[3]; // {next S, next R, next K}
int64_t reset_bytes; // offset in units of bytes to move back the pointer
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dStridedDgradFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dStridedDgradFilterIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout), RS(problem_size.R * problem_size.S) {
TRACE_CONV_INITIALIZERS("conv2d_dgrad", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
inc_next_strided = (layout.stride()[2] * threadmap_delta.strided() * element_size_bits) / 8;
// next S
inc_next[0] =
( (int64_t)layout.stride()[0] * problem_size.stride_w
//- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[2]
) * element_size_bits / 8;
// next R
inc_next[1] =
( (int64_t)layout.stride()[1] * problem_size.stride_h
//- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[2]
) * element_size_bits / 8;
// next K
inc_next[2] =
(
threadblock_shape.row() * problem_size.split_k_slices * (int64_t)layout.stride()[2]
//- (problem_size.R * problem_size.S - 1) * layout.stride()[0]
//- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[2]
) * element_size_bits / 8;
// offset in units of bytes to move the pointer in backward direction
reset_bytes = (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[2]
* element_size_bits / 8;
filter_k_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters object for Conv2d WGRAD Output Gradient (dy) iterator
struct Conv2dWgradOutputGradientIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int NPQ; // precomputd product of N*P*Q for clearing predicates
FastDivmod pq_divmod;
FastDivmod q_divmod;
int64_t offset_next_strided; // offset in units of bytes to next npq coordinate within tile
int64_t offset_next_contiguous; // offset in units of bytes to next k coordinate within tile
int64_t inc_next_npq; // offset in units of bytes to next npq position in subsequent tile
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dWgradOutputGradientIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dWgradOutputGradientIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout),
NPQ(problem_size.N * problem_size.P * problem_size.Q),
pq_divmod(problem_size.P * problem_size.Q),
q_divmod(problem_size.Q) {
TRACE_CONV_INITIALIZERS("conv2d_wgrad", "output_gradient",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
// Incremental offsets in unites of bytes (number of elements) * sizeof_bits<Element>::value / 8
offset_next_strided = (threadmap_delta.strided() * (int64_t)layout.stride()[0])
* element_size_bits / 8;
offset_next_contiguous = (threadmap_delta.contiguous())
* element_size_bits / 8;
inc_next_npq = (threadblock_shape.column() * problem_size.split_k_slices * (int64_t)layout.stride()[0])
* element_size_bits / 8;
}
};
struct Conv2dWgradActivationIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
FastDivmod sc_divmod;
FastDivmod pq_divmod;
FastDivmod q_divmod;
FastDivmod c_divmod;
FastDivmod s_divmod;
int small_channel_conv_s_offset;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dWgradActivationIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dWgradActivationIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout
):
layout(layout),
sc_divmod(problem_size.S * problem_size.C),
pq_divmod(problem_size.P * problem_size.Q),
q_divmod(problem_size.Q),
c_divmod(problem_size.C),
s_divmod(problem_size.S * problem_size.dilation_w),
small_channel_conv_s_offset((problem_size.S - 1) * problem_size.dilation_w - problem_size.pad_w) {
}
CUTLASS_HOST_DEVICE
Conv2dWgradActivationIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
Conv2dWgradActivationIteratorOptimizedParams(
problem_size,
layout
) {
TRACE_CONV_INITIALIZERS("conv2d_wgrad", "activation",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
}
};
struct PredicatedScaleBiasVectorAccessIteratorParams {
public:
/// Default ctor
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIteratorParams() { }
// Default ctor
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIteratorParams(
Conv2dProblemSize const &problem_size,
layout::PitchLinear const &layout) {}
// Default ctor
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIteratorParams(
Conv2dProblemSize const &problem_size,
layout::RowMajor const &layout) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 30,197 | C | 32.778523 | 123 | 0.619863 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dDgradFilterTileAccessIteratorOptimized;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradFilterTileAccessIteratorOptimized unity strided dgrad is more performant for dgrad
// on problem sizes with stride = {1x1}
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradFilterTileAccessIteratorOptimized <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kStrided,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Parameters structure
//
struct Params : Conv2dStridedDgradFilterIteratorOptimizedParams {
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Conv2dStridedDgradFilterIteratorOptimizedParams const &base):
Conv2dStridedDgradFilterIteratorOptimizedParams(base) { }
CUTLASS_HOST_DEVICE
Params(
Conv2dProblemSize const &problem_size,
Layout const &layout
):
Conv2dStridedDgradFilterIteratorOptimizedParams(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
) { }
};
private:
Conv2dStridedDgradFilterIteratorOptimizedParams const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
uint32_t predicates_[kAccessesPerVector];
int filter_k_;
int filter_r_;
int filter_s_;
int start_r_;
int start_s_;
int64_t reset_bytes_s_;
int64_t reset_bytes_r_;
//
// Assertions
//
// We map predicates into bits packed in this uint32_t container
static_assert(ThreadMap::Iterations::kStrided *
ThreadMap::Iterations::kContiguous < sizeof(predicates_) * 8,
"Currently, the number of loads per iteration is limited by the size of the predicates container.");
public:
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorOptimized(
Conv2dStridedDgradFilterIteratorOptimizedParams const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_{0},
filter_r_(start_r),
filter_s_(start_s),
start_r_(start_r),
start_s_(start_s) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.row() + thread_coord.strided();
Index column = threadblock_offset.column() + thread_coord.contiguous();
reset_bytes_s_ = (problem_size_.num_gemm_k_filter_s(start_s_) - 1) * params_.inc_next[0];
reset_bytes_r_ = reset_bytes_s_ +
(problem_size_.num_gemm_k_filter_r(start_r_) - 1) * params_.inc_next[1];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int filter_k = filter_k_ + s * ThreadMap::Delta::kStrided;
int filter_c = column + c * ThreadMap::Delta::kContiguous;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
uint32_t pred = ((filter_k < problem_size_.K && (filter_c + v * AccessType::kElements) < problem_size_.C) ? 1u : 0);
int pred_idx = c + s * ThreadMap::Iterations::kContiguous;
predicates_[v] |= (pred << pred_idx);
}
}
}
TensorCoord coord{filter_k_, filter_r_, filter_s_, column};
pointer_ += params_.layout(coord) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_DEVICE
void advance() {
int next_idx = 0;
LongIndex reset_bytes = params_.reset_bytes;
// Move filter_s by stride_w
filter_s_ += problem_size_.stride_w;
if (filter_s_ >= problem_size_.S) {
// Restore filter_s
filter_s_ = start_s_;
// Move filter_r by stride_h
filter_r_ += problem_size_.stride_h;
#if 0
bool check = (filter_r_ < problem_size_.R);
filter_r_ = check ? filter_r_ : start_r_;
next_idx = check ? 1 : 2;
reset_bytes += (check ? reset_bytes_s_ : reset_bytes_r_);
#else
asm volatile(
"{\n\t"
" .reg .pred %%p;\n\t"
" .reg .s64 t1;\n\t"
" setp.lt.s32 %%p, %3, %4;\n\t"
" selp.s32 %0, %3, %5, %%p;\n\t"
" selp.s32 %1, 1, 2, %%p;\n\t"
" selp.s64 t1, %6, %7, %%p;\n\t"
" add.s64 %2, %8, t1;\n\t"
"}\n"
: "=r"(filter_r_), "=r"(next_idx), "=l"(reset_bytes)
: "r"(filter_r_), "r"(problem_size_.R), "r"(start_r_),
"l"(reset_bytes_s_), "l"(reset_bytes_r_), "l"(reset_bytes));
#endif
}
// offset pointers by offset_bytes
pointer_ += (params_.inc_next[next_idx] - reset_bytes);
if (next_idx == 2) {
filter_k_ += params_.filter_k_delta;
}
// Clear predicates if needed
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
if (filter_k_ + s * ThreadMap::Delta::kStrided >= problem_size_.K) {
uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous);
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
predicates_[v] = (predicates_[v] & (~kClearMask));
}
}
}
}
/// Returns true if the current coordinate is within the filter tensor W
CUTLASS_HOST_DEVICE
bool valid() {
LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous;
return (predicates_[iteration_vector_] & (1u << pred_idx));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
// Move to the next K coordinate within the tile
pointer_ += params_.inc_next_strided;
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradFilterTileAccessIteratorOptimized unity strided dgrad is more performant for dgrad
// on problem sizes with stride = {1x1}
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradFilterTileAccessIteratorOptimized <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kUnity,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Parameters structure
//
struct Params : Conv2dDgradFilterIteratorOptimizedParams {
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Conv2dDgradFilterIteratorOptimizedParams const &base):
Conv2dDgradFilterIteratorOptimizedParams(base) { }
CUTLASS_HOST_DEVICE
Params(
Conv2dProblemSize const &problem_size,
Layout const &layout
):
Conv2dDgradFilterIteratorOptimizedParams(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
) { }
};
private:
Conv2dDgradFilterIteratorOptimizedParams const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
uint32_t predicates_[kAccessesPerVector];
int filter_rs_;
int filter_k_;
//
// Assertions
//
// We map predicates into bits packed in this uint32_t container
static_assert(ThreadMap::Iterations::kStrided *
ThreadMap::Iterations::kContiguous < sizeof(predicates_) * 8,
"Currently, the number of loads per iteration is limited by the size of the predicates container.");
public:
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorOptimized(
Conv2dDgradFilterIteratorOptimizedParams const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_{0},
filter_rs_(0),
filter_k_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.row() + thread_coord.strided();
Index column = threadblock_offset.column() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int filter_k = filter_k_ + s * ThreadMap::Delta::kStrided;
int filter_c = column + c * ThreadMap::Delta::kContiguous;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
uint32_t pred = ((filter_k < problem_size_.K && (filter_c + v * AccessType::kElements) < problem_size_.C) ? 1u : 0);
int pred_idx = c + s * ThreadMap::Iterations::kContiguous;
predicates_[v] |= (pred << pred_idx);
}
}
}
pointer_ += (
filter_k_ * params.layout.stride()[2] + column
) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
LongIndex next = params_.inc_next_rs;
// moves to the next tile
++filter_rs_;
if (filter_rs_ == params_.RS) {
filter_rs_ = 0;
next = params_.inc_next_k;
filter_k_ += params_.filter_k_delta;
}
// Clear predicates if needed
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
if (filter_k_ + s * ThreadMap::Delta::kStrided >= problem_size_.K) {
uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous);
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
predicates_[v] = (predicates_[v] & (~kClearMask));
}
}
}
pointer_ += next;
}
/// Returns true if the current coordinate is within the filter tensor W
CUTLASS_HOST_DEVICE
bool valid() {
LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous;
return (predicates_[iteration_vector_] & (1u << pred_idx));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
// Move to the next K coordinate within the tile
pointer_ += params_.inc_next_strided;
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 19,735 | C | 30.832258 | 126 | 0.64829 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/device/direct_convolution.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Template for device-level Depthwise Convolution
*/
#pragma once
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/device_kernel.h"
#include "cutlass/conv/convolution.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template<typename DirectConvolutionKernel_>
class DirectConvolution {
public:
using UnderlyingKernel = DirectConvolutionKernel_;
using ElementA = typename UnderlyingKernel::ElementA;
using LayoutA = typename UnderlyingKernel::LayoutA;
using ElementB = typename UnderlyingKernel::ElementB;
using LayoutB = typename UnderlyingKernel::LayoutB;
using ElementC = typename UnderlyingKernel::ElementC;
using LayoutC = typename UnderlyingKernel::LayoutC;
using ElementAccumulator = typename UnderlyingKernel::ElementAccumulator;
using ElementCompute = typename UnderlyingKernel::ElementCompute;
using OperatorClass = typename UnderlyingKernel::OperatorClass;
using ArchTag = typename UnderlyingKernel::ArchTag;
using ThreadblockShape = typename UnderlyingKernel::ThreadblockShape;
using WarpShape = typename UnderlyingKernel::WarpShape;
using InstructionShape = typename UnderlyingKernel::InstructionShape;
using ThreadblockSwizzle = typename UnderlyingKernel::ThreadblockSwizzle;
using EpilogueOutputOp = typename UnderlyingKernel::EpilogueOutputOp;
static int const kStages = UnderlyingKernel::kStages;
static int const kConvDim = UnderlyingKernel::kConvDim;
using WarpMmaOperator = typename UnderlyingKernel::WarpMmaOperator;
using ArchMmaOperator = typename UnderlyingKernel::ArchMmaOperator;
using MathOperator = typename UnderlyingKernel::MathOperator;
static cutlass::conv::Operator const kConvolutionalOperator = UnderlyingKernel::kConvolutionalOperator;
static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = UnderlyingKernel::kIteratorAlgorithm;
static cutlass::conv::StrideSupport const kStrideSupport = UnderlyingKernel::kStrideSupport;
static cutlass::conv::GroupMode const kGroupMode = UnderlyingKernel::kGroupMode;
static int const kWarpCount =
(ThreadblockShape::kM / WarpShape::kM) *
(ThreadblockShape::kN / WarpShape::kN) *
(ThreadblockShape::kK / WarpShape::kK);
/// Argument structure
using Arguments = typename UnderlyingKernel::Arguments;
using ReorderKernel = typename UnderlyingKernel::ReorderKernel;
private:
/// Kernel parameters object
typename UnderlyingKernel::Params params_;
public:
/// Constructs Implicit GEMM
DirectConvolution() { }
/// Determines whether the Implicit GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
// dispatch to iterators
Status status = UnderlyingKernel::Mma::IteratorA::can_implement(args.problem_size);
if (Status::kSuccess != status) {
return status;
}
status = UnderlyingKernel::Mma::IteratorB::can_implement(args.problem_size);
if (Status::kSuccess != status) {
return status;
}
if (kGroupMode != conv::GroupMode::kDepthwise) {
return Status::kErrorInvalidProblem;
}
// C and K should be multiple of groups
if (args.problem_size.K != args.problem_size.groups &&
args.problem_size.C != args.problem_size.groups) {
return Status::kErrorInvalidProblem;
}
static int const kAlignmentC = UnderlyingKernel::Epilogue::OutputTileIterator::kElementsPerAccess;
if (kConvolutionalOperator == conv::Operator::kFprop) {
if (args.problem_size.K % kAlignmentC)
return Status::kErrorMisalignedOperand;
} else if (kConvolutionalOperator == conv::Operator::kDgrad) {
if (args.problem_size.C % kAlignmentC)
return Status::kErrorMisalignedOperand;
} else if (kConvolutionalOperator == conv::Operator::kWgrad) {
if (args.problem_size.C % kAlignmentC)
return Status::kErrorMisalignedOperand;
}
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(
threadblock_swizzle.get_tiled_shape(
kConvolutionalOperator,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices));
if (!(grid.y <= std::numeric_limits<uint16_t>::max() &&
grid.z <= std::numeric_limits<uint16_t>::max())) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return 0;
}
/// Initializes GEMM state from arguments.
Status initialize(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
// initialize the params structure from the arguments
params_ = typename UnderlyingKernel::Params(
args,
static_cast<int *>(workspace)
);
int smem_size = int(sizeof(typename UnderlyingKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(cutlass::Kernel<UnderlyingKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Initializes GEMM state from arguments.
Status update(Arguments const &args, void *workspace = nullptr) {
// update the params structure from the arguments
params_.ptr_A = args.ref_A.data();
params_.ptr_B = args.ref_B.data();
params_.ptr_C = args.ref_C.data();
params_.ptr_D = args.ref_D.data();
params_.output_op = args.output_op;
params_.ptr_reordered_B = args.ref_reordered_B.data();;
params_.semaphore = static_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
// Launch reorder kernel
if (params_.ptr_reordered_B != nullptr) {
dim3 grid = ReorderKernel::get_grid_shape(params_);
dim3 block = ReorderKernel::get_block_shape();
cutlass::Kernel<ReorderKernel><<<grid, block, 0, stream>>>(params_);
}
// Launch main kernel
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(32 * kWarpCount, 1, 1);
// Dynamic SMEM size based on input params.
int smem_size = int(params_.get_smem_size());
// Make sure we can use that much shared memory.
cudaError_t status =
cudaFuncSetAttribute(cutlass::Kernel<UnderlyingKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size);
if (status != cudaSuccess)
return Status::kErrorInternal;
cutlass::Kernel<UnderlyingKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
int get_smem_size() { return int(params_.get_smem_size()); }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 9,744 | C | 35.092592 | 120 | 0.67149 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/device/implicit_gemm_convolution.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Template for device-level Implicit GEMM Convolution
*/
#pragma once
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/device_kernel.h"
#include "cutlass/conv/convolution.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template<typename ImplicitGemmKernel_>
class ImplicitGemmConvolution {
public:
using UnderlyingKernel = ImplicitGemmKernel_;
using ElementA = typename UnderlyingKernel::ElementA;
using LayoutA = typename UnderlyingKernel::LayoutA;
using ElementB = typename UnderlyingKernel::ElementB;
using LayoutB = typename UnderlyingKernel::LayoutB;
using ElementC = typename UnderlyingKernel::ElementC;
using LayoutC = typename UnderlyingKernel::LayoutC;
using ElementAccumulator = typename UnderlyingKernel::ElementAccumulator;
using ElementCompute = typename UnderlyingKernel::ElementCompute;
using OperatorClass = typename UnderlyingKernel::OperatorClass;
using ArchTag = typename UnderlyingKernel::ArchTag;
using ThreadblockShape = typename UnderlyingKernel::ThreadblockShape;
using WarpShape = typename UnderlyingKernel::WarpShape;
using InstructionShape = typename UnderlyingKernel::InstructionShape;
using ThreadblockSwizzle = typename UnderlyingKernel::ThreadblockSwizzle;
using EpilogueOutputOp = typename UnderlyingKernel::EpilogueOutputOp;
static int const kStages = UnderlyingKernel::kStages;
static int const kConvDim = UnderlyingKernel::kConvDim;
using WarpMmaOperator = typename UnderlyingKernel::WarpMmaOperator;
using ArchMmaOperator = typename UnderlyingKernel::ArchMmaOperator;
using MathOperator = typename UnderlyingKernel::MathOperator;
static cutlass::conv::Operator const kConvolutionalOperator = UnderlyingKernel::kConvolutionalOperator;
static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = UnderlyingKernel::kIteratorAlgorithm;
static cutlass::conv::StrideSupport const kStrideSupport = UnderlyingKernel::kStrideSupport;
static cutlass::conv::GroupMode const kGroupMode = UnderlyingKernel::kGroupMode;
static int const kWarpCount =
(ThreadblockShape::kM / WarpShape::kM) *
(ThreadblockShape::kN / WarpShape::kN) *
(ThreadblockShape::kK / WarpShape::kK);
/// Argument structure
using Arguments = typename UnderlyingKernel::Arguments;
private:
/// Kernel parameters object
typename UnderlyingKernel::Params params_;
public:
/// Constructs Implicit GEMM
ImplicitGemmConvolution() { }
/// Determines whether the Implicit GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
// dispatch to iterators
Status status = UnderlyingKernel::Mma::IteratorA::can_implement(args.problem_size);
if (Status::kSuccess != status) {
return status;
}
status = UnderlyingKernel::Mma::IteratorB::can_implement(args.problem_size);
if (Status::kSuccess != status) {
return status;
}
// check group conv constraint
if (args.problem_size.groups != 1) {
if (kGroupMode == conv::GroupMode::kNone) {
return Status::kErrorInvalidProblem;
}
// C and K should be multiple of groups
if (args.problem_size.K % args.problem_size.groups ||
args.problem_size.C % args.problem_size.groups) {
return Status::kErrorInvalidProblem;
}
// split-k is not supported
if (args.problem_size.split_k_slices != 1) {
return Status::kErrorInvalidProblem;
}
int k_per_group = args.problem_size.K / args.problem_size.groups;
// k_per_group should be multiple of ThreadblockShape N, one CTA calculate one group
if (kGroupMode == conv::GroupMode::kSingleGroup && k_per_group % ThreadblockShape::kN) {
return Status::kErrorInvalidProblem;
}
// ThreadblockShape::kN should be divisible by k_per_group, one CTA calculate multiple groups
if (kGroupMode == conv::GroupMode::kMultipleGroup && ThreadblockShape::kN % k_per_group) {
return Status::kErrorInvalidProblem;
}
// current optimized iterator algo only supports SingleGroup mode
if (kIteratorAlgorithm == IteratorAlgorithm::kOptimized &&
kGroupMode != conv::GroupMode::kSingleGroup) {
return Status::kErrorInvalidProblem;
}
}
static int const kAlignmentC = UnderlyingKernel::Epilogue::OutputTileIterator::kElementsPerAccess;
if (kConvolutionalOperator == conv::Operator::kFprop) {
if (args.problem_size.K % kAlignmentC)
return Status::kErrorMisalignedOperand;
} else if (kConvolutionalOperator == conv::Operator::kDgrad) {
if (args.problem_size.C % kAlignmentC)
return Status::kErrorMisalignedOperand;
} else if (kConvolutionalOperator == conv::Operator::kWgrad) {
if (args.problem_size.C % kAlignmentC)
return Status::kErrorMisalignedOperand;
}
// check for unsupported problem sizes for strided dgrad implementation
if (kConvolutionalOperator == conv::Operator::kDgrad &&
kStrideSupport == conv::StrideSupport::kStrided) {
// Unity stride (1x1) is supported by strided dgrad but disabled for performance
// reasons. For unity stride, use strided dgrad optimized unity stride specialization.
// Note that unit tests strided dgrad for unity stride to make sure that strided
// dgrad implemetnation is functionaly sound.
// Strided dgrad implementation also support mixed strides, i.e., (1x2) and (2x1)
if(args.problem_size.stride_h == 1 && args.problem_size.stride_w == 1) {
return Status::kErrorNotSupported;
}
// split-k (serial or parallel) is not supported for strided dgrad
if(args.problem_size.split_k_slices > 1) {
return Status::kErrorNotSupported;
}
// dilation > {1x1} is not supported for strided dgrad
if(args.problem_size.dilation_h > 1 || args.problem_size.dilation_w > 1) {
return Status::kErrorNotSupported;
}
}
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(
threadblock_swizzle.get_tiled_shape(
kConvolutionalOperator,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices));
if (!(grid.y <= std::numeric_limits<uint16_t>::max() &&
grid.z <= std::numeric_limits<uint16_t>::max())) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t workspace_bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
kConvolutionalOperator,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices);
if(args.split_k_mode == SplitKMode::kParallel) {
// Split-K parallel: CTAs in k-dimension write the partial results in a temporary workspace.
// The user needs to call a reduction operator to optain the final output tensor
workspace_bytes =
sizeof(ElementAccumulator) *
size_t(cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, args.problem_size)) *
size_t(grid_tiled_shape.k());
}
else if(args.split_k_mode == SplitKMode::kSerial && args.problem_size.split_k_slices > 1) {
// Split-K serial: The user workspace is used to store semaphore and serialize writing the
// final reduced output to user's output tensor
workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n());
}
return workspace_bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
if (args.problem_size.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
cudaError_t status = cudaMemsetAsync(workspace, 0, get_workspace_size(args), stream);
if (status != cudaSuccess) {
return Status::kErrorInternal;
}
}
// initialize the params structure from the arguments
params_ = typename UnderlyingKernel::Params(
args,
static_cast<int *>(workspace)
);
int smem_size = int(sizeof(typename UnderlyingKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(cutlass::Kernel<UnderlyingKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Initializes GEMM state from arguments.
Status update(Arguments const &args, void *workspace = nullptr) {
// update the params structure from the arguments
params_.ptr_A = args.ref_A.data();
params_.ptr_B = args.ref_B.data();
params_.ptr_C = args.ref_C.data();
params_.ptr_D = args.ref_D.data();
params_.output_op = args.output_op;
params_.semaphore = static_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(32 * kWarpCount, 1, 1);
int smem_size = int(sizeof(typename UnderlyingKernel::SharedStorage));
cutlass::Kernel<UnderlyingKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 12,619 | C | 36.337278 | 106 | 0.669387 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.