file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_trmm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
//
/*! \file
\brief
Default kernel-level TRMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/trmm_universal.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_trmm.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Side Mode for the kernel
SideMode SideMode_,
/// Fill Mode for the triangular matrix
FillMode FillMode_,
/// Diag Type for the triangular matrix
DiagType DiagType_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator>
struct DefaultTrmm;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Side Mode for the kernel
SideMode kSideMode,
/// Fill Mode for the triangular matrix
FillMode kFillMode,
/// Diag Type for the triangular matrix
DiagType kDiagType,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator>
struct DefaultTrmm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
kSideMode, kFillMode, kDiagType, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator> {
/// Define the threadblock-scoped triagular matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultTrmm<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
kSideMode, kFillMode, kDiagType,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount>::Epilogue;
/// Define the kernel-level TRMM operator.
using TrmmKernel = kernel::TrmmUniversal<Mma, Epilogue, ThreadblockSwizzle, kSideMode, kFillMode, kDiagType>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Side Mode for the kernel
SideMode kSideMode,
/// Fill Mode for the triangular matrix
FillMode kFillMode,
/// Diag Type for the triangular matrix
DiagType kDiagType,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator>
struct DefaultTrmm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
kSideMode, kFillMode, kDiagType, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator> {
/// Define the threadblock-scoped triagular matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultTrmm<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
kSideMode, kFillMode, kDiagType,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount>::Epilogue;
/// Define the kernel-level TRMM operator.
using TrmmKernel = kernel::TrmmUniversal<Mma, Epilogue, ThreadblockSwizzle, kSideMode, kFillMode, kDiagType>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 10,873 | C | 39.274074 | 111 | 0.681137 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/rank_2k_transpose_operands.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Transpositions for Rank2K problems.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA_,
typename LayoutA_,
ComplexTransform TransformA,
int AlignmentA,
typename ElementB_,
typename LayoutB_,
ComplexTransform TransformB,
int AlignmentB,
typename LayoutC_,
FillMode FillModeC_,
bool Transpose
>
struct Rank2KMapArguments {
using ElementA = ElementA_;
using LayoutA = LayoutA_;
static ComplexTransform const kTransformA = TransformA;
static int const kAlignmentA = AlignmentA;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
static ComplexTransform const kTransformB = TransformB;
static int const kAlignmentB = AlignmentB;
using LayoutC = LayoutC_;
static FillMode const kFillModeC = FillModeC_;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA_,
typename LayoutA_,
ComplexTransform TransformA,
int AlignmentA,
typename ElementB_,
typename LayoutB_,
ComplexTransform TransformB,
int AlignmentB,
typename LayoutC_,
FillMode FillModeC_
>
struct Rank2KMapArguments<
ElementA_,
LayoutA_,
TransformA,
AlignmentA,
ElementB_,
LayoutB_,
TransformB,
AlignmentB,
LayoutC_,
FillModeC_,
true
> {
using ElementA = ElementB_;
using LayoutA = LayoutB_;
static ComplexTransform const kTransformA = TransformB;
static int const kAlignmentA = AlignmentB;
using ElementB = ElementA_;
using LayoutB = LayoutA_;
static ComplexTransform const kTransformB = TransformA;
static int const kAlignmentB = AlignmentA;
using LayoutC = typename layout::LayoutTranspose<LayoutC_>::type;
static FillMode const kFillModeC = InvertFillMode<FillModeC_>::mode;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 4,334 | C | 32.346154 | 100 | 0.601061 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/rank_2k_universal.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma1_, ///! Threadblock-scoped matrix multiply-accumulate (A*B^T)
typename Mma2_, ///! Threadblock-scoped matrix multiply-accumulate (B*A^T)
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
FillMode FillModeC_, ///! Fill Mode for C (kLower or kUpper)
BlasMode BlasMode_ ///! Blas3 computation mode
>
struct Rank2KUniversal {
public:
using Mma1 = Mma1_;
using Mma2 = Mma2_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma1::IteratorA::Element;
using ElementB = typename Mma1::IteratorB::Element;
// Mma1 (A x B^T)
using LayoutA = typename Mma1::IteratorA::Layout;
using LayoutBT = typename Mma1::IteratorB::Layout;
static ComplexTransform const kMma1TransformA = Mma1::kTransformA;
static ComplexTransform const kMma1TransformB = Mma1::kTransformB;
// Mma2 (B x A^T)
using LayoutB = typename Mma2::IteratorA::Layout;
using LayoutAT = typename Mma2::IteratorB::Layout;
static ComplexTransform const kMma2TransformA = Mma2::kTransformA;
static ComplexTransform const kMma2TransformB = Mma2::kTransformB;
// Common type definitions for Mma1 and Mma2
using Operator = typename Mma1::Operator;
using OperatorClass = typename Mma1::Operator::OperatorClass;
using ThreadblockShape = typename Mma1::Shape;
using WarpShape = typename Mma1::Operator::Shape;
using InstructionShape = typename Mma1::Policy::Operator::InstructionShape;
using ArchTag = typename Mma1::ArchTag;
static int const kStages = Mma1::kStages;
static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements;
// Output related typedefinitions
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static FillMode const kFillModeC = FillModeC_;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
static BlasMode const kBlasMode = BlasMode_;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma1::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode;
GemmCoord problem_size;
int batch_count;
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc;
typename LayoutC::Stride::Index ldd;
//
// Methods
//
Arguments():
mode(GemmUniversalMode::kGemm),
batch_count(1),
ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr) { }
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd) {
}
/// Returns arguments for a the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
// Mma1 Iterator A and B params
typename Mma1::IteratorA::Params params_A;
typename Mma1::IteratorB::Params params_BT;
// Mma2 Iterator A and B params
typename Mma2::IteratorA::Params params_B;
typename Mma2::IteratorB::Params params_AT;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
GemmUniversalMode mode;
int batch_count;
int gemm_k_size;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
int *semaphore;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
swizzle_log_tile(0),
params_A(0),
params_BT(0),
params_B(0),
params_AT(0),
params_C(0),
params_D(0),
batch_count(0),
gemm_k_size(0),
mode(cutlass::gemm::GemmUniversalMode::kGemm),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
batch_stride_A(0),
batch_stride_B(0),
batch_stride_C(0),
batch_stride_D(0),
semaphore(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
int gemm_k_size,
void *workspace = nullptr
):
problem_size(args.problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(args.lda),
params_BT(args.ldb),
params_B(args.ldb),
params_AT(args.lda),
params_C(args.ldc),
params_D(args.ldd),
output_op(args.epilogue),
mode(args.mode),
batch_count(args.batch_count),
gemm_k_size(gemm_k_size),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(const_cast<void *>(args.ptr_D)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
semaphore(static_cast<int *>(workspace)) {
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr) {
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
output_op = args.epilogue;
semaphore = static_cast<int *>(workspace);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma1::SharedStorage mma1_main_loop;
typename Mma2::SharedStorage mma2_main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Methods
//
CUTLASS_DEVICE
Rank2KUniversal() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Early exit if Fill Mode is Lower and
// if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal)
if (kFillModeC == cutlass::FillMode::kLower &&
(threadblock_tile_offset.m() + 1) * Mma1::Shape::kM <= threadblock_tile_offset.n() * Mma1::Shape::kN) {
return;
}
// Early exit if Fill Mode is Upper and
// if the entire tile is below the main diagonal (top-right corner is at or below the diagonal)
if (kFillModeC == cutlass::FillMode::kUpper &&
threadblock_tile_offset.m() * Mma1::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) {
return;
}
bool tile_on_diagonal = false;
// Mark tiles that are being crossed by the main diagonal
// (top-right and bottom-left corners are on either side of the diagonal)
if ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM > threadblock_tile_offset.n() * Mma1::Shape::kN
&& threadblock_tile_offset.m() * Mma1::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) {
tile_on_diagonal = true;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_MxK{
threadblock_tile_offset.m() * Mma1::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_KxN{
offset_k,
threadblock_tile_offset.n() * Mma1::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands for Mma1
typename Mma1::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK);
typename Mma1::IteratorB iterator_BT(
params.params_BT,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_KxN);
// Construct iterators to A and B operands for Mma2
typename Mma2::IteratorA iterator_B(
params.params_B,
ptr_B,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK);
typename Mma2::IteratorB iterator_AT(
params.params_AT,
ptr_A,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_KxN);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply for Mma1 (A x BT)
Mma1 mma1(shared_storage.mma1_main_loop, thread_idx, warp_idx, lane_idx);
// Construct thread-scoped matrix multiply for Mma2 (B x AT)
Mma2 mma2(shared_storage.mma2_main_loop, thread_idx, warp_idx, lane_idx);
typename Mma1::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
// Compute threadblock-scoped matrix multiply-add (A x BT)
mma1(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_BT,
accumulators);
// HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK.
if (kBlasMode == BlasMode::kHermitian) {
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma1::Shape::kM,
threadblock_tile_offset.n() * Mma1::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// If CTA not on diagonal, FillMode doesn't apply.
FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone;
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
__syncthreads();
accumulators.clear();
}
// Compute threadblock-scoped matrix multiply-add (B x AT)
mma2(
gemm_k_iterations,
accumulators,
iterator_B,
iterator_AT,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
/* Needed for HER2K where the second HERK is multiplied by conj(alpha) */
typename EpilogueOutputOp::Params second_her2k_params(conj(params.output_op.alpha), 1);
EpilogueOutputOp output_op_her2k(second_her2k_params);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma1::Shape::kM,
threadblock_tile_offset.n() * Mma1::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
// HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK.
if (kBlasMode == BlasMode::kHermitian) {
ptr_C = static_cast<ElementC *>(params.ptr_D);
}
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
if (kBlasMode == BlasMode::kSymmetric) {
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
} else {
output_op_her2k.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// If CTA not on diagonal, FillMode doesn't apply.
FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone;
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
if (kBlasMode == BlasMode::kSymmetric) {
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
} else {
epilogue(
output_op_her2k,
iterator_D,
accumulators,
iterator_C);
}
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 24,162 | C | 30.017972 | 113 | 0.624907 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_splitk_parallel.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for GEMM performing a reduction over K partitions in parallel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmSplitKParallel {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
static int const kAlignmentK = Mma::Operator::Shape::kK;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorA::TensorRef ref_A;
typename Mma::IteratorB::Params params_B;
typename Mma::IteratorB::TensorRef ref_B;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::OutputTileIterator::TensorRef ref_D;
typename OutputOp::Params output_op;
int64_t splitk_slice_stride;
int gemm_k_size;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(): swizzle_log_tile(0) { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
typename OutputOp::Params output_op,
int64_t splitk_slice_stride
):
problem_size(problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A.layout()),
ref_A(ref_A),
params_B(ref_B.layout()),
ref_B(ref_B),
params_D(ref_D.layout()),
ref_D(ref_D),
output_op(output_op),
splitk_slice_stride(splitk_slice_stride) {
int full_gemm_k_iterations = problem_size.k() / Mma::Shape::kK;
int gemm_k_iterations = full_gemm_k_iterations / grid_tiled_shape.k();
gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
GemmSplitKParallel() { }
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size,
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Problem size is a function of threadblock index in the K dimension
int problem_size_k;
if (threadblock_tile_offset.k() + 1 == params.grid_tiled_shape.k()) {
problem_size_k = params.problem_size.k();
}
else {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
int warp_idx = threadIdx.x / 32;
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
iterator_D.add_pointer_offset(params.splitk_slice_stride * threadblock_tile_offset.k());
// Execute the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Run efficient epilogue
epilogue(output_op, iterator_D, accumulators, iterator_D);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 8,142 | C | 31.059055 | 106 | 0.640138 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_trmm_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level TRMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/trmm_universal.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_multistage_trmm_complex.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Side Mode for the kernel
SideMode SideMode_,
/// Fill Mode for the triangular matrix
FillMode FillMode_,
/// Diag Type for the triangular matrix
DiagType DiagType_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Multiply-add operator
// (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial
>
struct DefaultTrmmComplex;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Side Mode for the kernel
SideMode kSideMode,
/// Fill Mode for the triangular matrix
FillMode kFillMode,
/// Diag Type for the triangular matrix
DiagType kDiagType,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Multiply-add operator
// (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial
>
struct DefaultTrmmComplex<
ElementA, LayoutA, ElementB, LayoutB,
kSideMode, kFillMode, kDiagType,
ElementC, layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
ElementA, LayoutA, ElementB, LayoutB,
kSideMode, kFillMode, kDiagType,
ElementAccumulator,layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape,
WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator>::Epilogue;
/// Define the kernel-level TRMM operator.
using TrmmKernel = kernel::TrmmUniversal<Mma, Epilogue, ThreadblockSwizzle, kSideMode, kFillMode, kDiagType>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Side Mode for the kernel
SideMode kSideMode,
/// Fill Mode for the triangular matrix
FillMode kFillMode,
/// Diag Type for the triangular matrix
DiagType kDiagType,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Multiply-add operator
// (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial
>
struct DefaultTrmmComplex<
ElementA, LayoutA, ElementB, LayoutB,
kSideMode, kFillMode, kDiagType,
ElementC, layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
ElementA, LayoutA, ElementB, LayoutB,
kSideMode, kFillMode, kDiagType,
ElementAccumulator,layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape,
WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator>::Epilogue;
/// Define the kernel-level TRMM operator.
using TrmmKernel = kernel::TrmmUniversal<Mma, Epilogue, ThreadblockSwizzle, kSideMode, kFillMode, kDiagType>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 10,730 | C | 39.342105 | 111 | 0.700652 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemm_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts. Partial
specializations here choose 'device::GemmTransposed' to implement this functionality.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm.h"
#include "cutlass/gemm/kernel/gemm_pipelined.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_multistage_mma_complex.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Multiply-add operator
// (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial
>
struct DefaultGemmComplex;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Multiply-add operator
// (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial
>
struct DefaultGemmComplex<
ElementA, LayoutA, ElementB, LayoutB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator,
layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape,
WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Multiply-add operator
// (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial
>
struct DefaultGemmComplex<
ElementA, LayoutA, ElementB, LayoutB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassSimt,
arch::Sm50, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> {
/// Define the threadblock-scoped matrix multiply-accumulate
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape,
WarpShape,
InstructionShape,
ElementA, LayoutA,
ElementB, LayoutB,
ElementAccumulator, layout::RowMajor,
arch::OpClassSimt,
Stages,
Operator,
false,
cutlass::arch::CacheOperation::Global,
cutlass::arch::CacheOperation::Global,
TransformA,
TransformB
>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1,
typename MmaCore::IteratorThreadMapA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0,
typename MmaCore::IteratorThreadMapB>;
// Define the threadblock-scoped pipelined matrix multiply
using Mma = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::RowMajor, typename MmaCore::MmaPolicy>;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Multiply-add operator
// (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial
>
struct DefaultGemmComplex<
ElementA, LayoutA, ElementB, LayoutB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator,
layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape,
WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Multiply-add operator
// (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial
>
struct DefaultGemmComplex<
ElementA, LayoutA, ElementB, LayoutB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassSimt,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator,
layout::RowMajor, arch::OpClassSimt, arch::Sm80, ThreadblockShape,
WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 16,130 | C | 38.82963 | 100 | 0.702666 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_ell_gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Default kernel-level Blocked-Ell sparse gemm operators.
This operator combines threadblock-scoped ELL MMA
with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm.h"
#include "cutlass/gemm/kernel/gemm_pipelined.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
#include "cutlass/gemm/kernel/ell_gemm.h"
#include "cutlass/gemm/threadblock/default_ell_mma.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse>
struct DefaultEllGemm;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator, IsASparse> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Turing Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC, layout::RowMajor,
ElementAccumulator,
arch::OpClassTensorOp,
arch::Sm75,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
SplitKSerial,
Operator,
IsASparse
> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
layout::RowMajor,
arch::OpClassTensorOp,
arch::Sm75,
ThreadblockShape,
WarpShape,
InstructionShape,
2,
Operator
>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
typename Mma::Operator,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Integer Matrix Multiply Interleaved layout
template <
/// Element type for A matrix operand
typename ElementA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Number of Interleaved k
int InterleavedK,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse>
struct DefaultEllGemm<
ElementA, layout::ColumnMajorInterleaved<InterleavedK>, kAlignmentA,
ElementB, layout::RowMajorInterleaved<InterleavedK>, kAlignmentB, ElementC,
layout::ColumnMajorInterleaved<InterleavedK>, int32_t,
arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape,
InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages,
SplitKSerial, Operator, IsASparse> {
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementAccumulator = int32_t;
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, LayoutC, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages, Operator,
true>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::
DefaultInterleavedEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Turing Integer Matrix Multiply Interleaved layout
template <
/// Element type for A matrix operand
typename ElementA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of Interleaved k
int InterleavedK,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse>
struct DefaultEllGemm<ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
kAlignmentA, ElementB,
layout::RowMajorInterleaved<InterleavedK>, kAlignmentB,
ElementC, layout::ColumnMajorInterleaved<InterleavedK>,
int32_t, arch::OpClassTensorOp, arch::Sm75, ThreadblockShape,
WarpShape, InstructionShape, EpilogueOutputOp,
ThreadblockSwizzle, 2, SplitKSerial, Operator, IsASparse> {
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementAccumulator = int32_t;
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassTensorOp, arch::Sm75, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, true>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::
DefaultInterleavedEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Volta architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC, layout::RowMajor,
ElementAccumulator,
arch::OpClassTensorOp,
arch::Sm70,
ThreadblockShape,
WarpShape,
GemmShape<8, 8, 4>,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
SplitKSerial,
Operator,
IsASparse
> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
layout::RowMajor,
arch::OpClassTensorOp,
arch::Sm70,
ThreadblockShape,
WarpShape,
GemmShape<8, 8, 4>,
2,
Operator
>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
ThreadblockShape,
typename Mma::Operator,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for SIMT
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
layout::RowMajor,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
GemmShape<1, 1, 1>,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
SplitKSerial,
Operator,
IsASparse> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
layout::RowMajor,
arch::OpClassSimt,
arch::Sm50,
ThreadblockShape,
WarpShape,
GemmShape<1, 1, 1>,
2,
Operator>::ThreadblockMma;
static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
kEpilogueElementsPerAccess
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages
int Stages,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
layout::RowMajor,
ElementAccumulator,
arch::OpClassSimt,
arch::Sm80,
ThreadblockShape,
WarpShape,
GemmShape<1, 1, 1>,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
IsASparse> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassSimt, arch::Sm80,
ThreadblockShape, WarpShape, GemmShape<1, 1, 1>, Stages,
Operator>::ThreadblockMma;
static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
kEpilogueElementsPerAccess
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial,IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for SIMT DP4A
template <
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Layout type for C matrix operand
typename LayoutC,
/// Element type for C and D matrix operands
typename ElementC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<int8_t, LayoutA, kAlignmentA, int8_t, LayoutB, kAlignmentB,
ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt,
ArchTag, ThreadblockShape, WarpShape, GemmShape<1, 1, 4>,
EpilogueOutputOp, ThreadblockSwizzle, 2, SplitKSerial,
Operator, IsASparse> {
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using ElementB = int8_t;
using OperatorClass = arch::OpClassSimt;
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
LayoutC,
arch::OpClassSimt,
arch::Sm50,
ThreadblockShape,
WarpShape,
InstructionShape,
2,
Operator
>::ThreadblockMma;
static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
kEpilogueElementsPerAccess
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Wmma Gemm Kernel
template <
///< Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Sparse matrix is A or not
bool IsASparse
>
struct DefaultEllGemm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC, LayoutC,
ElementAccumulator,
arch::OpClassWmmaTensorOp,
ArchTag,
ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
IsASparse> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWmmaTensorOp<
ThreadblockShape,
typename Mma::Operator,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
};
////////////////////////////////////////////////////////////////////////////////
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 29,360 | C | 34.036993 | 100 | 0.678134 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_rank_2k_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level Rank2K definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/rank_2k_universal.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_multistage_mma_complex.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op_blas3.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Blas3 computation mode
BlasMode BlasMode_ = BlasMode::kSymmetric>
struct DefaultRank2KComplex;
////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
/// Layout type for A matrix operand
typename LayoutA_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation
ComplexTransform TransformA,
/// Complex elementwise transformation
ComplexTransform TransformB,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_
> struct Rank2KTransposedComplexTransform {
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
};
// partial specializations for HER2K CUBLAS_OP_N layout (ColumMajor)
template <>
struct Rank2KTransposedComplexTransform <
layout::ColumnMajor, layout::ColumnMajor,
ComplexTransform::kNone, ComplexTransform::kNone,
BlasMode::kHermitian> {
static ComplexTransform const kTransformA = ComplexTransform::kConjugate;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
};
// partial specializations for HER2K CUBLAS_OP_C layout (RowMajor + Complex conjugate)
template <>
struct Rank2KTransposedComplexTransform <
layout::RowMajor, layout::RowMajor,
ComplexTransform::kConjugate, ComplexTransform::kConjugate,
BlasMode::kHermitian> {
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kConjugate;
};
}
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture complex datatype (symmetric)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultRank2KComplex<
ElementA, LayoutA, ElementB, LayoutB, ElementC,
layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
TransformA, TransformB, Operator, SplitKSerial, BlasMode::kSymmetric> {
static BlasMode const kBlasMode = BlasMode::kSymmetric;
/// Define the threadblock-scoped matrix multiply-accumulate (A x B^T)
using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA,
ElementB, typename layout::LayoutTranspose<LayoutB>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape, Stages,
TransformA, TransformB, Operator>::ThreadblockMma;
/// Define the threadblock-scoped matrix multiply-accumulate (B x A^T)
using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementB, LayoutB,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape, Stages,
TransformA, TransformB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
/// Define the kernel-level Rank2K operator.
using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, kBlasMode>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture complex datatype (hermitian)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultRank2KComplex<
ElementA, LayoutA, ElementB, LayoutB, ElementC,
layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
TransformA, TransformB, Operator, SplitKSerial, BlasMode::kHermitian> {
static BlasMode const kBlasMode = BlasMode::kHermitian;
// Complex transform for input A and B matrices (function on input layout)
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using TransposedComplexTransform = detail::Rank2KTransposedComplexTransform<
LayoutA, LayoutB,
TransformA, TransformB,
kBlasMode>;
// Complex transform on operandA and operandB (function of blas3 computation)
static ComplexTransform const kTransformOperandA = TransposedComplexTransform::kTransformA;
static ComplexTransform const kTransformOperandB = TransposedComplexTransform::kTransformB;
/// Define the threadblock-scoped matrix multiply-accumulate (A x B^H)
using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA,
ElementB, typename layout::LayoutTranspose<LayoutB>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape, Stages,
kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma;
/// Define the threadblock-scoped matrix multiply-accumulate (B x A^H)
using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementB, LayoutB,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape, Stages,
kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
/// Define the kernel-level Rank2K operator.
using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, kBlasMode>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture complex datatype (symmetric)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultRank2KComplex<
ElementA, LayoutA, ElementB, LayoutB, ElementC,
layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
TransformA, TransformB, Operator, SplitKSerial, BlasMode::kSymmetric> {
static BlasMode const kBlasMode = BlasMode::kSymmetric;
/// Define the threadblock-scoped matrix multiply-accumulate (A x B^T)
using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA,
ElementB, typename layout::LayoutTranspose<LayoutB>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
TransformA, TransformB, Operator>::ThreadblockMma;
/// Define the threadblock-scoped matrix multiply-accumulate (B x A^T)
using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementB, LayoutB,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
TransformA, TransformB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
/// Define the kernel-level Rank2K operator.
using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, kBlasMode>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture complex datatype (hermitian)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultRank2KComplex<
ElementA, LayoutA, ElementB, LayoutB, ElementC,
layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
TransformA, TransformB, Operator, SplitKSerial, BlasMode::kHermitian> {
static BlasMode const kBlasMode = BlasMode::kHermitian;
// Complex transform for input A and B matrices (function on input layout)
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
using TransposedComplexTransform = detail::Rank2KTransposedComplexTransform<
LayoutA, LayoutB,
TransformA, TransformB,
kBlasMode>;
// Complex transform on operandA and operandB (function of blas3 computation)
static ComplexTransform const kTransformOperandA = TransposedComplexTransform::kTransformA;
static ComplexTransform const kTransformOperandB = TransposedComplexTransform::kTransformB;
/// Define the threadblock-scoped matrix multiply-accumulate (A x B^H)
using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementA, LayoutA,
ElementB, typename layout::LayoutTranspose<LayoutB>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma;
/// Define the threadblock-scoped matrix multiply-accumulate (B x A^H)
using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
ElementB, LayoutB,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
/// Define the kernel-level Rank2K operator.
using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, kBlasMode>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 20,509 | C | 40.102204 | 111 | 0.706909 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/grouped_problem_visitor.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Base scheduler for grouped problems
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumerated type describing the type of scheduling to perform for the ProblemVisitor
enum class GroupScheduleMode {
// Perform all scheduling on device
kDeviceOnly,
// Precompute on the host the full sequence of problems to access
kHostPrecompute
};
/// Visitor class to abstract away the algorithm for iterating over tiles
template <typename ProblemSizeHelper,
typename ThreadblockShape_>
struct BaseGroupedProblemVisitor {
using ThreadblockShape = ThreadblockShape_;
struct ProblemInfo {
static int32_t const kNoPrefetchEntry = -1;
int32_t problem_idx;
int32_t problem_start;
CUTLASS_DEVICE
ProblemInfo() : problem_idx(kNoPrefetchEntry), problem_start(kNoPrefetchEntry) {}
CUTLASS_DEVICE
ProblemInfo(int32_t problem_idx_, int32_t problem_start_) :
problem_idx(problem_idx_), problem_start(problem_start_) {}
};
struct Params {
cutlass::gemm::GemmCoord const *problem_sizes;
int32_t problem_count;
void const *workspace;
int32_t tile_count;
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
Params(): problem_sizes(nullptr), problem_count(0), workspace(nullptr), tile_count(0) { }
/// Ctor
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const *problem_sizes,
int32_t problem_count,
void const *workspace = nullptr,
int32_t tile_count = 0
):
problem_sizes(problem_sizes),
problem_count(problem_count),
workspace(workspace),
tile_count(tile_count)
{}
};
Params const ¶ms;
int32_t tile_idx;
int32_t problem_tile_start;
int32_t problem_idx;
//
// Methods
//
CUTLASS_DEVICE
BaseGroupedProblemVisitor(
Params const ¶ms_,
int32_t block_idx
):
params(params_),
tile_idx(block_idx),
problem_tile_start(0),
problem_idx(0)
{}
/// Get the grid shape
CUTLASS_HOST_DEVICE
static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) {
return ProblemSizeHelper::grid_shape(problem);
}
/// Gets the global tile index
CUTLASS_HOST_DEVICE
int32_t tile_index() const {
return tile_idx;
}
/// Gets the index of the problem
CUTLASS_HOST_DEVICE
int32_t problem_index() const {
return problem_idx;
}
CUTLASS_HOST_DEVICE
int32_t threadblock_idx() const {
return tile_idx - problem_tile_start;
}
CUTLASS_DEVICE
void advance(int32_t grid_size) {
tile_idx += grid_size;
}
CUTLASS_HOST_DEVICE
static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {
ProblemSizeHelper::possibly_transpose_problem(problem);
}
/// Returns the problem size for the current problem
CUTLASS_HOST_DEVICE
cutlass::gemm::GemmCoord problem_size() const {
GemmCoord problem = params.problem_sizes[problem_idx];
ProblemSizeHelper::possibly_transpose_problem(problem);
return problem;
}
CUTLASS_HOST_DEVICE
static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) {
return ProblemSizeHelper::tile_count(grid);
}
static int32_t group_tile_count(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count) {
int32_t total_tiles = 0;
for (int32_t i = 0; i < problem_count; ++i) {
auto problem = host_problem_sizes_ptr[i];
possibly_transpose_problem(problem);
auto grid = grid_shape(problem);
total_tiles += tile_count(grid);
}
return total_tiles;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ProblemSizeHelper,
typename ThreadblockShape,
GroupScheduleMode GroupScheduleMode_,
int PrefetchTileCount,
int ThreadCount
>
struct GroupedProblemVisitor;
/////////////////////////////////////////////////////////////////////////////////////////////////
// ProblemVisitor that performs all scheduling on device
//
template <typename ProblemSizeHelper,
typename ThreadblockShape,
int PrefetchTileCount,
int ThreadCount>
struct GroupedProblemVisitor<ProblemSizeHelper,
ThreadblockShape,
GroupScheduleMode::kDeviceOnly,
PrefetchTileCount,
ThreadCount>: public BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape> {
using Base = BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape>;
using Params = typename Base::Params;
static int const kThreadCount = ThreadCount;
static bool const kRequiresPrecomputation = false;
static int const kThreadsPerWarp = 32;
struct SharedStorage {};
// Final tile of the problem loaded by this thread. Each thread will hold
// a separate value.
int32_t problem_ending_tile;
SharedStorage &shared_storage;
//
// Methods
//
CUTLASS_DEVICE
GroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base(params_, block_idx),
problem_ending_tile(0),
shared_storage(shared_storage_)
{
this->problem_idx = -1 * kThreadsPerWarp;
this->problem_tile_start = 0;
}
CUTLASS_DEVICE
bool next_tile() {
// Check whether the tile to compute is within the range of the current problem.
int32_t problem_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, this->problem_idx % kThreadsPerWarp);
if (this->tile_idx < problem_tile_end) {
return true;
}
// Check whether the tile to compute is within the current group of problems fetched by the warp.
// The last tile for this group is the final tile of the problem held by the final thread in the warp.
int32_t group_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp-1);
// Keep the starting problem for this group in `problem_idx`. This is done to reduce
// register pressure. The starting problem for this group is simply the first problem
// in the group most recently fetched by the warp.
int32_t &group_problem_start = this->problem_idx;
group_problem_start = (this->problem_idx / kThreadsPerWarp) * kThreadsPerWarp;
// Keep the starting tile for this group in `problem_tile_start`. This is done to reduce
// register pressure.
int32_t &group_tile_start = this->problem_tile_start;
// Each thread in the warp processes a separate problem to advance until
// reaching a problem whose starting tile is less less than tile_idx.
while (group_tile_end <= this->tile_idx) {
group_problem_start += kThreadsPerWarp;
if (group_problem_start > this->params.problem_count) {
return false;
}
// Since `group_tile_start` is a reference to `this->problem_tile_start`, this
// also sets `this->problem_tile_start`. The fact that `this->problem_tile_start`
// is also set here is used later in `next_tile`.
group_tile_start = group_tile_end;
int lane_idx = threadIdx.x % kThreadsPerWarp;
int32_t lane_problem = group_problem_start + lane_idx;
// Compute the number of tiles in the problem assigned to each thread.
problem_ending_tile = 0;
if (lane_problem < this->params.problem_count) {
cutlass::gemm::GemmCoord problem = this->params.problem_sizes[lane_problem];
this->possibly_transpose_problem(problem);
cutlass::gemm::GemmCoord grid = this->grid_shape(problem);
problem_ending_tile = this->tile_count(grid);
}
// Compute a warp-wide inclusive prefix sum to compute the ending tile index of
// each thread's problem.
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < kThreadsPerWarp; i <<= 1) {
int32_t val = __shfl_up_sync(0xffffffff, problem_ending_tile, i);
if (lane_idx >= i) {
problem_ending_tile += val;
}
}
// The total tile count for this group is now in the final position of the prefix sum
int32_t tiles_in_group = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp-1);
problem_ending_tile += group_tile_start;
group_tile_end += tiles_in_group;
}
// The next problem to process is the first one that does not have ending tile position
// that is greater than or equal to tile index.
int32_t problem_idx_in_group =
__popc(__ballot_sync(0xffffffff, problem_ending_tile <= this->tile_idx));
this->problem_idx = group_problem_start + problem_idx_in_group;
// The starting tile for this problem is the ending tile of the previous problem. In cases
// where `problem_idx_in_group` is the first problem in the group, we do not need to reset
// `problem_tile_start`, because it is set to the previous group's ending tile in the while
// loop above.
if (problem_idx_in_group > 0) {
this->problem_tile_start = __shfl_sync(0xffffffff, problem_ending_tile, problem_idx_in_group - 1);
}
return true;
}
static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count) {
return 0;
}
static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count,
void* host_workspace_ptr) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Precomputes schedule on host and prefetches into shared memory
//
template <typename ProblemSizeHelper,
typename ThreadblockShape,
int PrefetchTileCount,
int ThreadCount>
struct GroupedProblemVisitor<ProblemSizeHelper,
ThreadblockShape,
GroupScheduleMode::kHostPrecompute,
PrefetchTileCount,
ThreadCount> : public BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape> {
static_assert(PrefetchTileCount > 0,
"GroupedProblemVisitor with GroupScheduleMode `kHostPrecompute` currently requires prefetching to shared memory");
using Base = BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape>;
using Params = typename Base::Params;
using ProblemInfo = typename Base::ProblemInfo;
static bool const kRequiresPrecomputation = true;
static int const kPrefetchTileCount = PrefetchTileCount;
static int const kThreadCount = ThreadCount;
struct SharedStorage {
// Sequence of problem IDs and starting tiles to compute
cutlass::Array<ProblemInfo, kPrefetchTileCount> prefetched_problems;
};
int32_t tiles_computed;
int32_t iterations_per_block;
int32_t block_load_start;
SharedStorage &shared_storage;
ProblemInfo const *problem_info_ptr;
//
// Methods
//
CUTLASS_DEVICE
GroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base(params_, block_idx),
tiles_computed(0),
shared_storage(shared_storage_),
problem_info_ptr(reinterpret_cast<ProblemInfo const*>(params_.workspace))
{
iterations_per_block = (params_.tile_count - 1 + gridDim.x) / gridDim.x;
block_load_start = iterations_per_block * block_idx;
// Start prefetching the first set of tiles to compute
prefetch_tiles();
}
CUTLASS_DEVICE
bool next_tile() {
if (this->tile_idx >= this->params.tile_count) {
return false;
}
int32_t prefetch_idx = (tiles_computed % kPrefetchTileCount);
if (prefetch_idx == 0) {
// Ensure all previous stores to shared memory have been completed
__syncthreads();
}
auto problem_info = shared_storage.prefetched_problems[prefetch_idx];
++tiles_computed;
if ((tiles_computed % kPrefetchTileCount) == 0) {
// Begin prefetching next set of tiles. Synchronize first to ensure that
// we don't overwrite the current buffer while someone else is using it.
__syncthreads();
prefetch_tiles();
}
this->problem_idx = problem_info.problem_idx;
this->problem_tile_start = problem_info.problem_start;
return true;
}
static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count) {
int32_t total_tiles = Base::group_tile_count(host_problem_sizes_ptr, problem_count);
int32_t entries_per_block = ((total_tiles - 1 + block_count) / block_count);
return sizeof(ProblemInfo) * entries_per_block * block_count;
}
#if !defined(__CUDACC_RTC__)
static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count,
void* host_workspace_ptr) {
ProblemInfo* host_problem_info_ptr = reinterpret_cast<ProblemInfo*>(host_workspace_ptr);
int32_t total_tiles = Base::group_tile_count(host_problem_sizes_ptr, problem_count);
int32_t entries_per_block = (total_tiles - 1 + block_count) / block_count;
int tile = 0;
int start_tile = 0;
for (int p_idx = 0; p_idx < problem_count; ++p_idx) {
auto problem = host_problem_sizes_ptr[p_idx];
Base::possibly_transpose_problem(problem);
auto grid = Base::grid_shape(problem);
int tiles = Base::tile_count(grid);
ProblemInfo problem_info(p_idx, start_tile);
for (int i = 0; i < tiles; ++i, ++tile) {
host_problem_info_ptr[(entries_per_block * (tile % block_count)) + (tile / block_count)] = problem_info;
}
start_tile += tiles;
}
}
#endif
private:
CUTLASS_DEVICE
void prefetch_tiles() {
// TODO: Consider changing to use async copies from global to shared mem
CUTLASS_PRAGMA_UNROLL
for (int32_t i = 0; i < kPrefetchTileCount; i += kThreadCount) {
int32_t offset = threadIdx.x + i;
if (offset < kPrefetchTileCount && (tiles_computed + offset < iterations_per_block)) {
shared_storage.prefetched_problems[offset] = problem_info_ptr[block_load_start + tiles_computed + offset];
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 16,849 | C | 35.236559 | 130 | 0.634756 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_with_k_reduction.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename EpilogueGemmKReduction_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmWithKReduction {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using EpilogueGemmKReduction = EpilogueGemmKReduction_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
using LayoutGemmKReduction = cutlass::layout::PitchLinear;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
static int const kReduceKForA = Mma::kReduceKForA;
//
// Structures
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
void * ptr_gemm_k_reduction;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_gemm_k_reduction;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc;
typename LayoutC::Stride::Index ldd;
typename LayoutGemmKReduction::Stride::Index ld_gemm_k_reduction;
//
// Methods
//
Arguments() :
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
ptr_gemm_k_reduction(nullptr)
{}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
void * ptr_gemm_k_reduction,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
int64_t batch_stride_gemm_k_reduction,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd,
typename LayoutGemmKReduction::Stride::Index ld_gemm_k_reduction)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), ptr_gemm_k_reduction(ptr_gemm_k_reduction),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_gemm_k_reduction(batch_stride_gemm_k_reduction),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ld_gemm_k_reduction(ld_gemm_k_reduction)
{
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>;
//
// Data members
//
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
void * ptr_gemm_k_reduction;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_gemm_k_reduction;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A(args.lda),
params_B(args.ldb),
params_C(args.ldc),
params_D(args.ldd),
output_op(args.epilogue),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_gemm_k_reduction(args.batch_stride_gemm_k_reduction),
ptr_D(args.ptr_D),
ptr_gemm_k_reduction(args.ptr_gemm_k_reduction)
{}
/// Assign and initialize the specified workspace buffer. Assumes
/// the memory allocated to workspace is at least as large as get_workspace_size().
Status init_workspace(
void *workspace,
cudaStream_t stream = nullptr)
{
CUTLASS_TRACE_HOST("GemmUniversal::Params::Params() - problem_size: " << this->problem_size);
if (this->mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D = workspace;
ptr_gemm_k_reduction = static_cast<uint8_t *>(workspace)
+ sizeof(ElementC) * size_t(this->batch_stride_D) * size_t(this->grid_tiled_shape.k());
return Status::kSuccess;
}
return ParamsBase::init_workspace(workspace, stream);
}
/// Returns the workspace size (in bytes) needed for this problem geometry
size_t get_workspace_size() const
{
size_t workspace_bytes = ParamsBase::get_workspace_size();
if (this->mode == GemmUniversalMode::kGemmSplitKParallel)
{
// Split-K parallel always requires a temporary workspace
workspace_bytes +=
sizeof(ElementC) *
size_t(batch_stride_gemm_k_reduction) *
size_t(this->grid_tiled_shape.k());
}
return workspace_bytes;
}
/// Lightweight update given a subset of arguments. Problem geometry is assumed
/// to remain the same.
void update(Arguments const &args)
{
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
ptr_gemm_k_reduction = args.ptr_gemm_k_reduction;
output_op = args.epilogue;
CUTLASS_TRACE_HOST("GemmUniversal::Params::update()");
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
CUTLASS_TRACE_HOST("GemmUniversal::can_implement()");
static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand A");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand B");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand C");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmWithKReduction op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
typename Mma::FragmentReduction gemm_k_accumulators;
gemm_k_accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators,
gemm_k_accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
ElementC *ptr_gemm_k_reduction = static_cast<ElementC *>(params.ptr_gemm_k_reduction);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
ptr_gemm_k_reduction += threadblock_tile_offset.k() * params.batch_stride_gemm_k_reduction;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
if ((kReduceKForA && threadblock_tile_offset.n() == 0)
|| (!kReduceKForA && threadblock_tile_offset.m() == 0)) {
int warp_idx_mn = warp_idx % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Mma::Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Mma::Base::WarpCount::kM;
if ((kReduceKForA && warp_idx_n == 0)
|| (!kReduceKForA && warp_idx_m == 0)) {
int reduction_warp_idx = kReduceKForA ? warp_idx_m : warp_idx_n;
int reduction_threadblock_offset = kReduceKForA ? threadblock_tile_offset.m() :
threadblock_tile_offset.n();
int reduction_vector_size = kReduceKForA ? params.problem_size.m()
: params.problem_size.n();
EpilogueGemmKReduction epilogue_gemm_k_reduction(thread_idx,
reduction_warp_idx,
lane_idx,
reduction_threadblock_offset,
ptr_gemm_k_reduction);
epilogue_gemm_k_reduction(
reduction_vector_size,
gemm_k_accumulators,
params.mode == GemmUniversalMode::kGemm
&& (params.grid_tiled_shape.k() > 1)
&& (threadblock_tile_offset.k() > 0));
}
}
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 23,629 | C | 32.951149 | 163 | 0.615515 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_grouped.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Problem visitor for grouped GEMMs
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform
bool Transposed = false
>
struct GemmGrouped {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_;
static bool const kTransposed = Transposed;
// Optional transpose
using MapArguments = kernel::detail::MapArguments<
typename Mma::IteratorA::Element,
typename Mma::IteratorA::Layout,
Mma::kTransformA,
Mma::IteratorA::AccessType::kElements,
typename Mma::IteratorB::Element,
typename Mma::IteratorB::Layout,
Mma::kTransformB,
Mma::IteratorB::AccessType::kElements,
typename Mma::LayoutC,
kTransposed
>;
// Public-facing type definitions related to operand element type, layout, and complex conjugate
// operation. Must interact with the 'kTransposed' notion.
using ElementA = typename MapArguments::ElementA;
using LayoutA = typename MapArguments::LayoutA;
using ElementB = typename MapArguments::ElementB;
using LayoutB = typename MapArguments::LayoutB;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename MapArguments::LayoutC;
static ComplexTransform const kTransformA = MapArguments::kTransformA;
static ComplexTransform const kTransformB = MapArguments::kTransformB;
// Type definitions about the mainloop.
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = MapArguments::kAlignmentA;
static int const kAlignmentB = MapArguments::kAlignmentB;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using ProblemVisitor = GemmGroupedProblemVisitor<
ThreadblockShape,
kGroupScheduleMode,
kThreadCount,
kThreadCount,
kTransposed>;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord *problem_sizes;
int problem_count;
int threadblock_count;
typename EpilogueOutputOp::Params output_op;
ElementA ** ptr_A;
ElementB ** ptr_B;
ElementC ** ptr_C;
ElementC ** ptr_D;
typename LayoutA::Stride::LongIndex *lda;
typename LayoutB::Stride::LongIndex *ldb;
typename LayoutC::Stride::LongIndex *ldc;
typename LayoutC::Stride::LongIndex *ldd;
// Only used by device-level operator
GemmCoord *host_problem_sizes;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments():
problem_count(0),
threadblock_count(0),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
lda(nullptr),
ldb(nullptr),
ldc(nullptr),
ldd(nullptr),
host_problem_sizes(nullptr)
{
}
/// Ctor
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord *problem_sizes,
int problem_count,
int threadblock_count,
typename EpilogueOutputOp::Params output_op,
ElementA ** ptr_A,
ElementB ** ptr_B,
ElementC ** ptr_C,
ElementC ** ptr_D,
typename LayoutA::Stride::LongIndex *lda,
typename LayoutB::Stride::LongIndex *ldb,
typename LayoutC::Stride::LongIndex *ldc,
typename LayoutC::Stride::LongIndex *ldd,
GemmCoord *host_problem_sizes=nullptr
):
problem_sizes(problem_sizes),
problem_count(problem_count),
threadblock_count(threadblock_count),
output_op(output_op),
ptr_A(ptr_A),
ptr_B(ptr_B),
ptr_C(ptr_C),
ptr_D(ptr_D),
lda(lda),
ldb(ldb),
ldc(ldc),
ldd(ldd),
host_problem_sizes(host_problem_sizes)
{
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
typename ProblemVisitor::Params problem_visitor;
int threadblock_count;
typename EpilogueOutputOp::Params output_op;
ElementA ** ptr_A;
ElementB ** ptr_B;
ElementC ** ptr_C;
ElementC ** ptr_D;
typename LayoutA::Stride::LongIndex *lda;
typename LayoutB::Stride::LongIndex *ldb;
typename LayoutC::Stride::LongIndex *ldc;
typename LayoutC::Stride::LongIndex *ldd;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
lda(nullptr),
ldb(nullptr),
ldc(nullptr),
ldd(nullptr)
{ }
CUTLASS_HOST_DEVICE
Params(Arguments const &args,
void *workspace = nullptr,
int tile_count = 0):
problem_visitor(args.problem_sizes, args.problem_count, workspace, tile_count),
threadblock_count(args.threadblock_count),
output_op(args.output_op),
ptr_A(args.ptr_A),
ptr_B(args.ptr_B),
ptr_C(args.ptr_C),
ptr_D(args.ptr_D),
lda(args.lda),
ldb(args.ldb),
ldc(args.ldc),
ldd(args.ldd)
{
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr,
int tile_count = 0) {
problem_visitor = typename ProblemVisitor::Params(args.problem_sizes, args.problem_count,
workspace, tile_count);
threadblock_count = args.threadblock_count;
output_op = args.output_op;
ptr_A = args.ptr_A;
ptr_B = args.ptr_B;
ptr_C = args.ptr_C;
ptr_D = args.ptr_D;
lda = args.lda;
ldb = args.ldb;
ldc = args.ldc;
ldd = args.ldd;
}
};
/// Shared memory storage structure
struct SharedStorage {
union {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
} kernel;
// ProblemVisitor shared storage can't be overlapped with others
typename ProblemVisitor::SharedStorage problem_visitor;
};
public:
//
// Methods
//
CUTLASS_DEVICE
GemmGrouped() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) {
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
//
// These types shadow the type-level definitions and support the ability to implement
// a 'transposed' GEMM that computes the transposed problems.
//
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
//
// Problem visitor.
//
ProblemVisitor problem_visitor(
params.problem_visitor,
shared_storage.problem_visitor,
blockIdx.x);
// Outer 'persistent' loop to iterate over tiles
while (problem_visitor.next_tile()) {
GemmCoord problem_size = problem_visitor.problem_size();
int32_t problem_idx = problem_visitor.problem_index();
int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx());
GemmCoord grid_shape = problem_visitor.grid_shape(problem_size);
cutlass::gemm::GemmCoord threadblock_offset(
int(threadblock_idx / grid_shape.n()) * Mma::Shape::kM,
int(threadblock_idx % grid_shape.n()) * Mma::Shape::kN,
0);
// Load element pointers. Exchange pointers and strides if working on the transpose
ElementA *ptr_A = reinterpret_cast<ElementA *>((kTransposed ? params.ptr_B[problem_idx] : params.ptr_A[problem_idx]));
typename LayoutA::LongIndex ldm_A = (kTransposed ? params.ldb[problem_idx] : params.lda[problem_idx]);
ElementB *ptr_B = reinterpret_cast<ElementB *>((kTransposed ? params.ptr_A[problem_idx] : params.ptr_B[problem_idx]));
typename LayoutB::LongIndex ldm_B = (kTransposed ? params.lda[problem_idx] : params.ldb[problem_idx]);
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_offset.m(),
0,
};
cutlass::MatrixCoord tb_offset_B{
0,
threadblock_offset.n()
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
LayoutA(ldm_A),
ptr_A,
{problem_size.m(), problem_size.k()},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
LayoutB(ldm_B),
ptr_B,
{problem_size.k(), problem_size.n()},
thread_idx,
tb_offset_B);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Matrix multiply phase
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.kernel.main_loop, thread_idx, warp_idx, lane_idx);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Wait for all threads to finish their epilogue phases from the previous tile.
__syncthreads();
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
ElementC *ptr_C = params.ptr_C[problem_idx];
ElementC *ptr_D = params.ptr_D[problem_idx];
LayoutC layout_C(params.ldc[problem_idx]);
LayoutC layout_D(params.ldd[problem_idx]);
typename Epilogue::OutputTileIterator::Params params_C(layout_C);
typename Epilogue::OutputTileIterator::Params params_D(layout_D);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params_C,
ptr_C,
problem_size.mn(),
thread_idx,
threadblock_offset.mn()
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params_D,
ptr_D,
problem_size.mn(),
thread_idx,
threadblock_offset.mn()
);
Epilogue epilogue(
shared_storage.kernel.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
// Next tile
problem_visitor.advance(gridDim.x);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 14,711 | C | 29.522822 | 124 | 0.629053 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_planar_complex_array.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmPlanarComplexArray {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
using Operator = typename Mma::Operator;
using ArchTag = typename Mma::ArchTag;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(
128 / sizeof_bits<ElementA>::value,
128 / sizeof_bits<ElementB>::value);
//
// Additional types needed for reflection
//
using ElementAccumulator = typename Mma::Policy::Operator::ElementC;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::Shape;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
//
// Arguments structure
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue;
int const *ptr_M;
int const *ptr_N;
int const *ptr_K;
void const * const * ptr_A_real;
void const * const * ptr_A_imag;
void const * const * ptr_B_real;
void const * const * ptr_B_imag;
void const * const * ptr_C_real;
void const * const * ptr_C_imag;
void * const * ptr_D_real;
void * const * ptr_D_imag;
typename LayoutA::Stride::Index lda_real;
typename LayoutA::Stride::Index lda_imag;
typename LayoutB::Stride::Index ldb_real;
typename LayoutB::Stride::Index ldb_imag;
typename LayoutC::Stride::Index ldc_real;
typename LayoutC::Stride::Index ldc_imag;
typename LayoutC::Stride::Index ldd_real;
typename LayoutC::Stride::Index ldd_imag;
//
// Methods
//
Arguments():
ptr_M(nullptr),
ptr_N(nullptr),
ptr_K(nullptr),
ptr_A_real(nullptr),
ptr_A_imag(nullptr),
ptr_B_real(nullptr),
ptr_B_imag(nullptr),
ptr_C_real(nullptr),
ptr_C_imag(nullptr),
ptr_D_real(nullptr),
ptr_D_imag(nullptr)
{}
/// constructs an arguments structure
Arguments(
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
int const *ptr_M,
int const *ptr_N,
int const *ptr_K,
void const * const * ptr_A_real,
void const * const * ptr_A_imag,
void const * const * ptr_B_real,
void const * const * ptr_B_imag,
void const * const * ptr_C_real,
void const * const * ptr_C_imag,
void * const * ptr_D_real,
void * const * ptr_D_imag,
typename LayoutA::Stride::Index lda_real,
typename LayoutA::Stride::Index lda_imag,
typename LayoutB::Stride::Index ldb_real,
typename LayoutB::Stride::Index ldb_imag,
typename LayoutC::Stride::Index ldc_real,
typename LayoutC::Stride::Index ldc_imag,
typename LayoutC::Stride::Index ldd_real,
typename LayoutC::Stride::Index ldd_imag)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_M(ptr_M),
ptr_N(ptr_N),
ptr_K(ptr_K),
ptr_A_real(ptr_A_real),
ptr_A_imag(ptr_A_imag),
ptr_B_real(ptr_B_real),
ptr_B_imag(ptr_B_imag),
ptr_C_real(ptr_C_real),
ptr_C_imag(ptr_C_imag),
ptr_D_real(ptr_D_real),
ptr_D_imag(ptr_D_imag),
lda_real(lda_real),
lda_imag(lda_imag),
ldb_real(ldb_real),
ldb_imag(ldb_imag),
ldc_real(ldc_real),
ldc_imag(ldc_imag),
ldd_real(ldd_real),
ldd_imag(ldd_imag)
{}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_M, args.ptr_N);
std::swap(args.ptr_A_real, args.ptr_B_real);
std::swap(args.ptr_A_imag, args.ptr_B_imag);
std::swap(args.lda_real, args.ldb_real);
std::swap(args.lda_imag, args.ldb_imag);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>;
//
// Data members
//
typename Mma::IteratorA::Params params_A_real;
typename Mma::IteratorA::Params params_A_imag;
typename Mma::IteratorB::Params params_B_real;
typename Mma::IteratorB::Params params_B_imag;
typename Epilogue::OutputTileIterator::Params params_C_real;
typename Epilogue::OutputTileIterator::Params params_C_imag;
typename Epilogue::OutputTileIterator::Params params_D_real;
typename Epilogue::OutputTileIterator::Params params_D_imag;
typename EpilogueOutputOp::Params output_op;
int const *ptr_M;
int const *ptr_N;
int const *ptr_K;
void const * const * ptr_A_real;
void const * const * ptr_A_imag;
void const * const * ptr_B_real;
void const * const * ptr_B_imag;
void const * const * ptr_C_real;
void const * const * ptr_C_imag;
void * const * ptr_D_real;
void * const * ptr_D_imag;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
ptr_M(args.ptr_M),
ptr_N(args.ptr_N),
ptr_K(args.ptr_K),
params_A_real(args.lda_real),
params_A_imag(args.lda_imag),
params_B_real(args.ldb_real),
params_B_imag(args.ldb_imag),
params_C_real(args.ldc_real),
params_C_imag(args.ldc_imag),
params_D_real(args.ldd_real),
params_D_imag(args.ldd_imag),
output_op(args.epilogue),
ptr_A_real(args.ptr_A_real),
ptr_A_imag(args.ptr_A_imag),
ptr_B_real(args.ptr_B_real),
ptr_B_imag(args.ptr_B_imag),
ptr_C_real(args.ptr_C_real),
ptr_C_imag(args.ptr_C_imag),
ptr_D_real(args.ptr_D_real),
ptr_D_imag(args.ptr_D_imag)
{}
/// Lightweight update given a subset of arguments. Problem geometry is assumed
/// to remain the same.
void update(Arguments const &args)
{
ptr_M = args.ptr_M;
ptr_N = args.ptr_N;
ptr_K = args.ptr_K;
ptr_A_real = args.ptr_A_real;
ptr_A_imag = args.ptr_A_imag;
ptr_B_real = args.ptr_B_real;
ptr_B_imag = args.ptr_B_imag;
ptr_C_real = args.ptr_C_real;
ptr_C_imag = args.ptr_C_imag;
ptr_D_real = args.ptr_D_real;
ptr_D_imag = args.ptr_D_imag;
output_op = args.epilogue;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(Arguments const &args) {
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = args.problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = args.problem_size.m() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = args.problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = args.problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = args.problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = args.problem_size.m() % kAlignmentC;
}
if (isAMisaligned || isBMisaligned || isCMisaligned) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmPlanarComplexArray op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int batch_idx = threadblock_tile_offset.k();
int problem_size_m = params.problem_size.m();
int problem_size_n = params.problem_size.n();
int problem_size_k = params.problem_size.k();
ElementA *ptr_A_real = static_cast<ElementA *>(const_cast<void *>(params.ptr_A_real[batch_idx]));
ElementA *ptr_A_imag = static_cast<ElementA *>(const_cast<void *>(params.ptr_A_imag[batch_idx]));
ElementB *ptr_B_real = static_cast<ElementB *>(const_cast<void *>(params.ptr_B_real[batch_idx]));
ElementB *ptr_B_imag = static_cast<ElementB *>(const_cast<void *>(params.ptr_B_imag[batch_idx]));
//
// If pointers for problem sizes are specified, these are loaded from global memory
//
if (params.ptr_M) {
problem_size_m = params.ptr_M[batch_idx];
}
if (params.ptr_N) {
problem_size_n = params.ptr_N[batch_idx];
}
if (params.ptr_K) {
problem_size_k = params.ptr_K[batch_idx];
}
int const kBlockCountM = (problem_size_m + Mma::Shape::kM - 1) / Mma::Shape::kM;
int const kBlockCountN = (problem_size_n + Mma::Shape::kN - 1) / Mma::Shape::kN;
int const kGemmKIterations = (problem_size_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
//
// Each threadblock loops over the logical problem size which the kernel may have discovered
// after the grid is launched.
//
CUTLASS_PRAGMA_NO_UNROLL
for (int block_m = threadblock_tile_offset.m();
block_m < kBlockCountM;
block_m += params.grid_tiled_shape.m()) {
CUTLASS_PRAGMA_NO_UNROLL
for (int block_n = threadblock_tile_offset.n();
block_n < kBlockCountN;
block_n += params.grid_tiled_shape.n()) {
//
// Compute indices within threadblock and warp.
//
int thread_idx = threadIdx.x;
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Proceed with regular GEMM logic.
//
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{ block_m * Mma::Shape::kM, 0};
cutlass::MatrixCoord tb_offset_B{ 0, block_n * Mma::Shape::kN };
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A_real(
params.params_A_real,
ptr_A_real,
{problem_size_m, problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorA iterator_A_imag(
params.params_A_imag,
ptr_A_imag,
{problem_size_m, problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B_real(
params.params_B_real,
ptr_B_real,
{problem_size_k, problem_size_n},
thread_idx,
tb_offset_B);
typename Mma::IteratorB iterator_B_imag(
params.params_B_imag,
ptr_B_imag,
{problem_size_k, problem_size_n},
thread_idx,
tb_offset_B);
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
mma(
kGemmKIterations,
accumulators,
iterator_A_real,
iterator_A_imag,
iterator_B_real,
iterator_B_imag,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
//assume identity swizzle
MatrixCoord threadblock_offset(
block_m * Mma::Shape::kM,
block_n * Mma::Shape::kN
);
ElementC *ptr_C_real = static_cast<ElementC *>(const_cast<void *>(params.ptr_C_real[batch_idx]));
ElementC *ptr_C_imag = static_cast<ElementC *>(const_cast<void *>(params.ptr_C_imag[batch_idx]));
ElementC *ptr_D_real = static_cast<ElementC *>(params.ptr_D_real[batch_idx]);
ElementC *ptr_D_imag = static_cast<ElementC *>(params.ptr_D_imag[batch_idx]);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C_real(
params.params_C_real,
ptr_C_real,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_C_imag(
params.params_C_imag,
ptr_C_imag,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D_real(
params.params_D_real,
ptr_D_real,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_D_imag(
params.params_D_imag,
ptr_D_imag,
{problem_size_m, problem_size_n},
thread_idx,
threadblock_offset
);
//
// Construct epilogue
//
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D_real,
iterator_D_imag,
accumulators,
iterator_C_real,
iterator_C_imag);
} // for block_n
} // for block_m
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 18,961 | C | 29.633279 | 105 | 0.614472 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Problem visitor for grouped Rank2K operations.
This problem visitor is specialized for Rank2K operations, for which matrix C is upper/lower
triangular. Using a problem visitor designed for GEMMs for Rank2K problems is inefficient
because threadblocks will be frequently assigned to tiles that exit early (e.g., due to
being assigned to a tile in the upper-triangular portion of a lower-triangular problem).
This can lead to load imbalance among threadblocks, as the GEMM-based scheduler
assigns all threadblocks to nearly the same number of tiles, regardless of whether
those tiles exit early.
Consider an example of a group of four Rank2Ks with matrix C consisting of a grid of 2x2 tiles.
Consider a grid of 8 threadblocks. The default GEMM scheduler will assign threadblocks to
tiles in the following order:
Rank2K 0 Rank2K 1 Rank2K 2 Rank2K 3
0 1 4 5 0 1 4 5
2 3 6 7 2 3 6 7
Assuming that the problems are lower triangular, blocks 1 and 5 are continuously assigned
to inactive tiles.
This problem visitor aims to assign threadblocks to only those tiles which are in the
upper/lower triangular portion of a given problem. Using the example above, the resulting
assignment would be:
Rank2K 0 Rank2K 1 Rank2K 2 Rank2K 3
0 - 3 - 6 - 1 -
1 2 4 5 7 0 2 3
Achieving the schedule above requires a mapping from threadblock ID to tile coordinates (i, j).
We will illustrate this by mapping on a lower-triangular matrix with a 3x3 grid. We first
calculate row and column indices assuming one-indexed rows, tiles, and threadblock IDs, and
then subtract one to convert to zero-indexed.
Col 1 Col 2 Col 3
----------------------
Row 1 | 1 - -
Row 2 | 2 3 -
Row 3 | 4 5 6
We next outline this mapping, borrowing from: https://stackoverflow.com/a/40954159
Calculating row i given threadblock ID t
----------------------------------------
For a given row i, all threadblock IDs t in that row satisfy the following:
t <= 1 + 2 + 3 + ... + (i-1) + i
The closed-form equation for the right-hand side is: i(i+1)/2.
Using this, we can solve for i given t:
t <= i(i+1)/2
2t <= i^2 + i
2t <= i^2 + i + 0.25 - 0.25
2t + 0.25 <= i^2 + i + 0.25
2t + 0.25 <= (i + 0.5)^2
sqrt(2t + 0.25) - 0.5 <= i
To account for fractional values, we set:
i = ceil(sqrt(2t + 0.25) - 0.5)
To turn this into a zero-indexed row and work with zero-indexed t, we perform:
i = ceil(sqrt(2(t+1) + 0.25) - 0.5) - 1
= ceil(sqrt(2t + 2.25) - 0.5) - 1
Calculating column j given threadblock ID t and row i
-----------------------------------------------------
For a given row i, all threadblock IDs t in that row also satisfy the following:
t > 1 + 2 + 3 + ... + (i-2) + (i-1)
--> t > i(i-1)/2
Threadblock IDs within a given row are sequential, so the one-indexed column ID
for one-indexed threadblock ID t and row i is:
j = t - (i(i-1)/2)
The zero-indexed version becomes:
j = (t+1) - (i(i+1)/2) -1
= t - (i(i+1)/2)
Accounting for non-square grids
-------------------------------
Though the overall output problem size for Rank2K problems is guranteed to be square, the
grids used in computing may not be square due to using non-square threadblock shapes. For
example, a threadblock shape of 64x32 operating on a problem of output size 128x128 would
result in a grid of 2x4 tiles.
This case can be handled by noting that the output resembles a square grid of 2x2 "macro tiles"
each of which contains 2 "true tiles." We can thus first map a threadblock ID to its "macro tile"
using the equations above, and then map it to the "true tile" within its "macro tile." In the example
of a 2x4 grid, this mapping would look as follows:
"Macro grid" "True grid"
{0, 1} - 0 1 - -
{2, 3} {4, 5} 2 3 4 5
A zero-indexed threadblock ID t is mapped to its "macro tile ID" t_macro as:
t_macro = t // r
Where r is the ratio of the maximum dimension of the grid to the minimum dimension of the grid
(i.e., r = 4 / 2 = 2 in the previous example).
One uses t_macro and the calculations above to find the row and column in the square matrix to
obtain i_macro and j_macro (zero-indexed). The mapping from (i_macro, j_macro) --> (i, j)
is simply the following:
if (ThreadblockShape::M > ThreadblockShape::N):
r = ThreadblockShape::M / ThreadblockShape::N
i = i_macro
j = (j_macro * r) + (t % r)
elif (ThreadblockShape::M < ThreadblockShape::N):
r = ThreadblockShape::N / ThreadblockShape::M
i = (i_macro * r) + (t % r)
j = j_macro
else:
i = i_macro
j = j_macro
Handling cases with grid dimensions that aren't multiples of eachother
----------------------------------------------------------------------
Even though threadblock shapes M and N are typically multiples of one another, the grid
for a given problem may not have dimensions of the same ratio as that of the threadblock.
For example, a problem of size 132x132 using a threadblock of shape 64x32 will result
in a grid of 3x5 tiles. In this case, there is not an integer number of "true tiles"
per "macro tile."
When this scenario arises, we simply pad the larger dimension of the grid such that
there are an integer number of "true tiles" per "macro tile." Thus, the 3x5 grid in
the example above will be treated as a 3x6 grid. Row and column positions for each
tile are calculated as above. Any threadblocks that map to tiles that are outside the
problem range or upper/lower triangular portion (e.g., (2, 5)) will exit early from
this problem and may proceed to the next problem in the group.
Handling upper-triangular matrices
----------------------------------
The only modification needed for upper-triangular matrices is to swap i_macro and j_macro
in the calculations above.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/gemm/kernel/grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
namespace detail {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Helpers for calculating offsets for Rank2K problem visitor. These helpers specifically pertain
// to the conversion from "macro tiles" to "true tiles" in the description above.
//
template <
typename ThreadblockShape,
typename Enable = void
>
struct Rank2KGroupedProblemVisitorOffsetHelper;
// Partial specialization for the case where threadblock shape M > threadblock shape N
template <
typename ThreadblockShape
>
struct Rank2KGroupedProblemVisitorOffsetHelper<
ThreadblockShape,
typename platform::enable_if< (ThreadblockShape::kM > ThreadblockShape::kN) >::type
> {
static_assert(ThreadblockShape::kM % ThreadblockShape::kN == 0,
"Rank2KGroupedProblemVisitor with threadblock shape M > threadblock shape N "
"requires that threadblock shape M be a multiple of threadblock shape N.");
static int32_t const kThreadblockSkewRatio = ThreadblockShape::kM / ThreadblockShape::kN;
CUTLASS_HOST_DEVICE
static int32_t min_dim(cutlass::gemm::GemmCoord grid) {
return grid.m();
}
CUTLASS_HOST_DEVICE
static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) {
return row;
}
CUTLASS_HOST_DEVICE
static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) {
return (col * kThreadblockSkewRatio) + (threadblock_id % kThreadblockSkewRatio);
}
};
// Partial specialization for the case where threadblock shape M < threadblock shape N
template <
typename ThreadblockShape
>
struct Rank2KGroupedProblemVisitorOffsetHelper<
ThreadblockShape,
typename platform::enable_if< (ThreadblockShape::kM < ThreadblockShape::kN) >::type
> {
static_assert(ThreadblockShape::kN % ThreadblockShape::kM == 0,
"Rank2KGroupedProblemVisitor with threadblock shape M < threadblock shape N "
"requires that threadblock shape N be a multiple of threadblock shape M.");
static int32_t const kThreadblockSkewRatio = ThreadblockShape::kN / ThreadblockShape::kM;
CUTLASS_HOST_DEVICE
static int32_t min_dim(cutlass::gemm::GemmCoord grid) {
return grid.n();
}
CUTLASS_HOST_DEVICE
static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) {
return (row * kThreadblockSkewRatio) + (threadblock_id % kThreadblockSkewRatio);
}
CUTLASS_HOST_DEVICE
static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) {
return col;
}
};
// Partial specialization for the case where threadblock shape M == threadblock shape N
// In this case, macro tiles are equivalent to true tiles, so the conversions are
// identity functions.
template <
typename ThreadblockShape
>
struct Rank2KGroupedProblemVisitorOffsetHelper<
ThreadblockShape,
typename platform::enable_if< (ThreadblockShape::kM == ThreadblockShape::kN) >::type
> {
static int32_t const kThreadblockSkewRatio = 1;
CUTLASS_HOST_DEVICE
static int32_t min_dim(cutlass::gemm::GemmCoord grid) {
return grid.m();
}
CUTLASS_HOST_DEVICE
static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) {
return row;
}
CUTLASS_HOST_DEVICE
static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) {
return col;
}
};
// Helper for correctly representing problem sizes in grouped kernels
template <typename ThreadblockShape>
struct Rank2KGroupedProblemSizeHelper {
using OffsetHelper = Rank2KGroupedProblemVisitorOffsetHelper<ThreadblockShape>;
CUTLASS_HOST_DEVICE
static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) {
return cutlass::gemm::GemmCoord(
((problem.m() - 1 + ThreadblockShape::kM) / ThreadblockShape::kM),
((problem.n() - 1 + ThreadblockShape::kN) / ThreadblockShape::kN),
1);
}
CUTLASS_HOST_DEVICE
static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) {
// Return the number of tiles at or below the diagonal (or at and above
// for mode kUpper). We do this by first calculating this value assuming
// we have a square matrix of tiles of size `dim x dim` where `dim` is the
// minimum among {grid.m(), grid.n()}. We then multiply the resulting value
// by OffsetHelper::kThreadblockSkewRatio to account for cases in which there
// are more tiles in one dimension than the other.
int32_t dim = OffsetHelper::min_dim(grid);
int32_t tiles_on_diagonal = dim;
int32_t tiles_below_diagonal = ((dim * (dim - 1)) / 2);
return (tiles_on_diagonal + tiles_below_diagonal) * OffsetHelper::kThreadblockSkewRatio;
}
CUTLASS_HOST_DEVICE
static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Default problem visitor for fill modes kUpper and kLower.
//
template <typename ThreadblockShape,
GroupScheduleMode GroupScheduleMode_,
int PrefetchTileCount,
int ThreadCount,
cutlass::FillMode FillModeC>
struct Rank2KGroupedProblemVisitor : public GroupedProblemVisitor<
detail::Rank2KGroupedProblemSizeHelper<ThreadblockShape>,
ThreadblockShape,
GroupScheduleMode_,
PrefetchTileCount,
ThreadCount> {
static cutlass::FillMode const kFillModeC = FillModeC;
static_assert(kFillModeC == cutlass::FillMode::kLower || kFillModeC == cutlass::FillMode::kUpper,
"Default Rank2KGroupedProblemVisitor requires fill mode of kLower or kUpper.");
using ProblemSizeHelper = detail::Rank2KGroupedProblemSizeHelper<ThreadblockShape>;
using Base = GroupedProblemVisitor<ProblemSizeHelper,
ThreadblockShape,
GroupScheduleMode_,
PrefetchTileCount,
ThreadCount>;
using OffsetHelper = typename ProblemSizeHelper::OffsetHelper;
using Params = typename Base::Params;
using SharedStorage = typename Base::SharedStorage;
//
// Methods
//
CUTLASS_DEVICE
Rank2KGroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base(params_, shared_storage_, block_idx)
{}
CUTLASS_DEVICE
cutlass::gemm::GemmCoord threadblock_offset(int32_t threadblock_id) const {
int32_t macro_id = threadblock_id / OffsetHelper::kThreadblockSkewRatio;
int32_t macro_row = ceil(cutlass::fast_sqrt((2*macro_id) + 2.25) - 0.5) - 1;
int32_t macro_col = macro_id - (((macro_row+1) * macro_row)/2);
if (kFillModeC == cutlass::FillMode::kUpper) {
swap(macro_row, macro_col);
}
int32_t row = OffsetHelper::macro_row_to_row(macro_row, threadblock_id);
int32_t col = OffsetHelper::macro_col_to_col(macro_col, threadblock_id);
return cutlass::gemm::GemmCoord(row, col, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 16,100 | C | 41.708223 | 105 | 0.632733 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/params_universal_base.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Base functionality for common types of universal GEMM kernel parameters
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Argument structure
struct UniversalArgumentsBase
{
//
// Data members
//
GemmUniversalMode mode;
GemmCoord problem_size;
int batch_count;
int64_t batch_stride_D;
//
// Methods
//
UniversalArgumentsBase() :
mode(GemmUniversalMode::kGemm),
batch_count(1),
batch_stride_D(0)
{}
/// constructs an arguments structure
UniversalArgumentsBase(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
int64_t batch_stride_D)
:
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
batch_stride_D(batch_stride_D)
{
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
};
/// Parameters structure
template <
typename ThreadblockSwizzle,
typename ThreadblockShape,
typename ElementA,
typename ElementB,
typename ElementC>
struct UniversalParamsBase
{
//
// Data members
//
GemmCoord problem_size;
GemmCoord grid_tiled_shape;
int swizzle_log_tile;
GemmUniversalMode mode;
int batch_count;
int gemm_k_size;
int64_t batch_stride_D;
int *semaphore;
//
// Host dispatch API
//
/// Default constructor
UniversalParamsBase() = default;
/// Constructor
UniversalParamsBase(
UniversalArgumentsBase const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
problem_size(args.problem_size),
mode(args.mode),
batch_count(args.batch_count),
batch_stride_D(args.batch_stride_D),
semaphore(nullptr)
{
ThreadblockSwizzle swizzle;
// Get GEMM volume in thread block tiles
grid_tiled_shape = swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
swizzle_log_tile = swizzle.get_log_tile(grid_tiled_shape);
// Determine extent of K-dimension assigned to each block
gemm_k_size = args.problem_size.k();
if (args.mode == GemmUniversalMode::kGemm || args.mode == GemmUniversalMode::kGemmSplitKParallel)
{
int const kAlignK = const_max(const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value), 1);
gemm_k_size = round_up(ceil_div(args.problem_size.k(), args.batch_count), kAlignK);
if (gemm_k_size) {
grid_tiled_shape.k() = ceil_div(args.problem_size.k(), gemm_k_size);
}
}
}
/// Returns the workspace size (in bytes) needed for this problem geometry
size_t get_workspace_size() const
{
size_t workspace_bytes = 0;
if (mode == GemmUniversalMode::kGemmSplitKParallel)
{
// Split-K parallel always requires a temporary workspace
workspace_bytes =
sizeof(ElementC) *
size_t(batch_stride_D) *
size_t(grid_tiled_shape.k());
}
else if (mode == GemmUniversalMode::kGemm && grid_tiled_shape.k() > 1)
{
// Serial split-K only requires a temporary workspace if the number of partitions along the
// GEMM K dimension is greater than one.
workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n());
}
return workspace_bytes;
}
/// Assign and initialize the specified workspace buffer. Assumes
/// the memory allocated to workspace is at least as large as get_workspace_size().
Status init_workspace(
void *workspace,
cudaStream_t stream = nullptr)
{
semaphore = static_cast<int *>(workspace);
// Zero-initialize entire workspace
if (semaphore)
{
size_t workspace_bytes = get_workspace_size();
CUTLASS_TRACE_HOST(" Initialize " << workspace_bytes << " workspace bytes");
cudaError_t result = cudaMemsetAsync(
semaphore,
0,
workspace_bytes,
stream);
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Returns the GEMM volume in thread block tiles
GemmCoord get_tiled_shape() const
{
return grid_tiled_shape;
}
/// Returns the total number of thread blocks to launch
int get_grid_blocks() const
{
dim3 grid_dims = get_grid_dims();
return grid_dims.x * grid_dims.y * grid_dims.z;
}
/// Returns the grid extents in thread blocks to launch
dim3 get_grid_dims() const
{
return ThreadblockSwizzle().get_grid_shape(grid_tiled_shape);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 7,148 | C | 28.060975 | 122 | 0.629687 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/rank_k_universal.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
FillMode FillModeC_ ///! Fill Mode for C (kLower or kUpper)
>
struct RankKUniversal {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static FillMode const kFillModeC = FillModeC_;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = 128 / sizeof_bits<ElementA>::value;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode;
GemmCoord problem_size;
int batch_count;
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_C;
int64_t batch_stride_D;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc;
typename LayoutC::Stride::Index ldd;
//
// Methods
//
Arguments():
mode(GemmUniversalMode::kGemm),
batch_count(1),
ptr_A(nullptr), ptr_C(nullptr), ptr_D(nullptr) { }
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::Index lda,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd) {
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
GemmUniversalMode mode;
int batch_count;
int gemm_k_size;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
int *semaphore;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
swizzle_log_tile(0),
params_A(0),
params_B(0),
params_C(0),
params_D(0),
batch_count(0),
gemm_k_size(0),
mode(cutlass::gemm::GemmUniversalMode::kGemm),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
batch_stride_A(0),
batch_stride_B(0),
batch_stride_C(0),
batch_stride_D(0),
semaphore(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
int gemm_k_size,
void *workspace = nullptr
):
problem_size(args.problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(args.lda),
params_B(args.lda),
params_C(args.ldc),
params_D(args.ldd),
output_op(args.epilogue),
mode(args.mode),
batch_count(args.batch_count),
gemm_k_size(gemm_k_size),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_A)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(const_cast<void *>(args.ptr_D)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_A),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
semaphore(static_cast<int *>(workspace)) {
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr) {
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_A);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
output_op = args.epilogue;
semaphore = static_cast<int *>(workspace);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Methods
//
CUTLASS_DEVICE
RankKUniversal() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Early exit if Fill Mode is Lower and
// if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal)
if (kFillModeC == cutlass::FillMode::kLower &&
(threadblock_tile_offset.m() + 1) * Mma::Shape::kM <= threadblock_tile_offset.n() * Mma::Shape::kN) {
return;
}
// Early exit if Fill Mode is Upper and
// if the entire tile is below the main diagonal (top-right corner is at or below the diagonal)
if (kFillModeC == cutlass::FillMode::kUpper &&
threadblock_tile_offset.m() * Mma::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma::Shape::kN) {
return;
}
bool tile_on_diagonal = false;
// Mark tiles that are being crossed by the main diagonal
// (top-right and bottom-left corners are on either side of the diagonal)
if ((threadblock_tile_offset.m() + 1) * Mma::Shape::kM > threadblock_tile_offset.n() * Mma::Shape::kN
&& threadblock_tile_offset.m() * Mma::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma::Shape::kN) {
tile_on_diagonal = true;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// If CTA not on diagonal, FillMode doesn't apply.
FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone;
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
kFillModeCTA
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 17,567 | C | 30.038869 | 111 | 0.630785 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_symm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level SYMM/HEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/symm_universal.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_trmm.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Side Mode for A (kLeft or kRight)
SideMode kSideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode kFillModeA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_ = BlasMode::kSymmetric>
struct DefaultSymm;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode kSideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode kFillModeA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator>
struct DefaultSymm<
ElementA, LayoutA, kSideModeA, kFillModeA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC,layout::RowMajor,
ElementAccumulator, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator> {
/// Define the threadblock-scoped triagular matrix multiply-accumulate
/// TRMM - with diagonal: alpha * A * B or alpha * B * A
static const DiagType kDiagTypeMma1 = DiagType::kNonUnit;
using Mma1 = typename cutlass::gemm::threadblock::DefaultTrmm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
kSideModeA, kFillModeA, kDiagTypeMma1,
ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape,
Stages, Operator>::ThreadblockMma;
/// Define the threadblock-scoped triagular matrix multiply-accumulate
/// TRMM - withOUT diagonal: alpha * AT * B or alpha * B * AT
static const DiagType kDiagTypeMma2 = DiagType::kZero;
using LayoutAMma2 = typename platform::conditional<
(kSideModeA == SideMode::kLeft),
typename layout::LayoutTranspose<LayoutA>::type,
LayoutA
>::type;
using LayoutBMma2 = typename platform::conditional<
(kSideModeA == SideMode::kLeft),
LayoutB,
typename layout::LayoutTranspose<LayoutB>::type
>::type;
using Mma2 = typename cutlass::gemm::threadblock::DefaultTrmm<
ElementA, LayoutAMma2, kAlignmentA,
ElementB, LayoutBMma2, kAlignmentB,
kSideModeA, InvertFillMode<kFillModeA>::mode, kDiagTypeMma2,
ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape,
Stages, Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount>::Epilogue;
/// Define the kernel-level SYMM/HEMM operator.
using SymmKernel = kernel::SymmUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, kSideModeA, kFillModeA>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode kSideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode kFillModeA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator>
struct DefaultSymm<
ElementA, LayoutA, kSideModeA, kFillModeA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC,layout::RowMajor,
ElementAccumulator, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator> {
/// Define the threadblock-scoped triagular matrix multiply-accumulate
/// TRMM - with diagonal: alpha * A * B or alpha * B * A
static const DiagType kDiagTypeMma1 = DiagType::kNonUnit;
using Mma1 = typename cutlass::gemm::threadblock::DefaultTrmm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
kSideModeA, kFillModeA, kDiagTypeMma1,
ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape,
Stages, Operator>::ThreadblockMma;
/// Define the threadblock-scoped triagular matrix multiply-accumulate
/// TRMM - withOUT diagonal: alpha * AT * B or alpha * B * AT
static const DiagType kDiagTypeMma2 = DiagType::kZero;
using LayoutAMma2 = typename platform::conditional<
(kSideModeA == SideMode::kLeft),
typename layout::LayoutTranspose<LayoutA>::type,
LayoutA
>::type;
using LayoutBMma2 = typename platform::conditional<
(kSideModeA == SideMode::kLeft),
LayoutB,
typename layout::LayoutTranspose<LayoutB>::type
>::type;
using Mma2 = typename cutlass::gemm::threadblock::DefaultTrmm<
ElementA, LayoutAMma2, kAlignmentA,
ElementB, LayoutBMma2, kAlignmentB,
kSideModeA, InvertFillMode<kFillModeA>::mode, kDiagTypeMma2,
ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape,
Stages, Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount>::Epilogue;
/// Define the kernel-level SYMM/HEMM operator.
using SymmKernel = kernel::SymmUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, kSideModeA, kFillModeA>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 13,375 | C | 40.540373 | 109 | 0.660112 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_trmm_universal.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level TRMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/kernel/trmm_universal.h"
#include "cutlass/gemm/kernel/default_trmm.h"
#include "cutlass/gemm/kernel/default_trmm_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Side Mode for the kernel
SideMode kSideMode,
/// Fill Mode for the triangular matrix
FillMode kFillMode,
/// Diag Type for the triangular matrix
DiagType kDiagType,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by TRMM
typename Operator,
///
typename Enable = void
>
struct DefaultTrmmUniversal;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Real-valued TRMM kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Side Mode for the kernel
SideMode kSideMode,
/// Fill Mode for the triangular matrix
FillMode kFillMode,
/// Diag Type for the triangular matrix
DiagType kDiagType,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by TRMM
typename Operator>
struct DefaultTrmmUniversal<
ElementA,
LayoutA,
ComplexTransform::kNone, // transform A
kAlignmentA,
ElementB,
LayoutB,
ComplexTransform::kNone, // transform B
kAlignmentB,
kSideMode,
kFillMode,
kDiagType,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
typename std::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultTrmmKernel = typename kernel::DefaultTrmm<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
kSideMode,
kFillMode,
kDiagType,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator
>::TrmmKernel;
/// Define the kernel in terms of the default kernel
using TrmmKernel = kernel::TrmmUniversal<
typename DefaultTrmmKernel::Mma,
typename DefaultTrmmKernel::Epilogue,
ThreadblockSwizzle,
kSideMode,
kFillMode,
kDiagType
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex-valued TRMM kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Side Mode for the kernel
SideMode kSideMode,
/// Fill Mode for the triangular matrix
FillMode kFillMode,
/// Diag Type for the triangular matrix
DiagType kDiagType,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by TRMM
typename Operator
>
struct DefaultTrmmUniversal<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
kSideMode,
kFillMode,
kDiagType,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
typename std::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultTrmmKernel = typename kernel::DefaultTrmmComplex<
ElementA,
LayoutA,
ElementB,
LayoutB,
kSideMode,
kFillMode,
kDiagType,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
TransformA,
TransformB,
Operator,
SplitKSerial
>::TrmmKernel;
/// Define the kernel in terms of the default kernel
using TrmmKernel = kernel::TrmmUniversal<
typename DefaultTrmmKernel::Mma,
typename DefaultTrmmKernel::Epilogue,
ThreadblockSwizzle,
kSideMode,
kFillMode,
kDiagType
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 10,850 | C | 29.141667 | 100 | 0.660645 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_rank_2k_grouped.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level grouped Rank2K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/kernel/rank_2k_transpose_operands.h"
#include "cutlass/gemm/kernel/default_rank_2k.h"
#include "cutlass/gemm/kernel/default_rank_2k_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_ = BlasMode::kSymmetric,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly,
///
typename Enable = void
>
struct DefaultRank2KGrouped;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Real-valued grouped Rank2K
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_
>
struct DefaultRank2KGrouped<ElementA, LayoutA, TransformA, kAlignmentA,
ElementB, LayoutB, TransformB, kAlignmentB,
ElementC, LayoutC,
FillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape,
WarpShape, InstructionShape, EpilogueOutputOp,
ThreadblockSwizzle, Stages, Operator, BlasMode_, GroupScheduleMode_,
typename std::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
> {
// If true, we must construct a 'transposed-and-exchanged' Rank2K operator.
static bool const kInternalTranspose = platform::is_same<LayoutC, layout::ColumnMajor>::value;
using MapArguments = kernel::detail::Rank2KMapArguments<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
LayoutC,
FillModeC,
kInternalTranspose
>;
// Define the default grouped Rank2K kernel
using DefaultRank2Kkernel = typename kernel::DefaultRank2K<
typename MapArguments::ElementA,
typename MapArguments::LayoutA,
MapArguments::kAlignmentA,
typename MapArguments::ElementB,
typename MapArguments::LayoutB,
MapArguments::kAlignmentB,
ElementC,
typename MapArguments::LayoutC,
MapArguments::kFillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
false, // SplitKSerial
Operator,
BlasMode_
>::Rank2Kkernel;
/// Define the kernel in terms of the default kernel
using Rank2Kkernel = kernel::Rank2KGrouped<
typename DefaultRank2Kkernel::Mma1,
typename DefaultRank2Kkernel::Mma2,
typename DefaultRank2Kkernel::Epilogue,
ThreadblockSwizzle,
TransformA,
TransformB,
DefaultRank2Kkernel::kFillModeC,
DefaultRank2Kkernel::kBlasMode,
GroupScheduleMode_,
kInternalTranspose
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex-valued grouped Rank2K
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_
>
struct DefaultRank2KGrouped<ElementA, LayoutA, TransformA, kAlignmentA,
ElementB, LayoutB, TransformB, kAlignmentB,
ElementC, LayoutC,
FillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape,
WarpShape, InstructionShape, EpilogueOutputOp,
ThreadblockSwizzle, Stages, Operator, BlasMode_, GroupScheduleMode_,
typename std::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
> {
// If true, we must construct a 'transposed-and-exchanged' Rank2K operator.
static bool const kInternalTranspose = platform::is_same<LayoutC, layout::ColumnMajor>::value;
using MapArguments = kernel::detail::Rank2KMapArguments<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
LayoutC,
FillModeC,
kInternalTranspose
>;
// Define the default grouped Rank2K kernel
using DefaultRank2Kkernel = typename kernel::DefaultRank2KComplex<
typename MapArguments::ElementA,
typename MapArguments::LayoutA,
typename MapArguments::ElementB,
typename MapArguments::LayoutB,
ElementC,
typename MapArguments::LayoutC,
MapArguments::kFillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MapArguments::kTransformA,
MapArguments::kTransformB,
Operator,
false, // SplitKSerial
BlasMode_
>::Rank2Kkernel;
/// Define the kernel in terms of the default kernel
/// Pass through the user-provided TransformA and TransformB so as to
/// correctly set public-facing TransformA and TransformB in kernel::Rank2KGrouped.
/// This is needed because kernel::DefaultRank2KComplex may change TransformA and
/// TransformB that become template arguments to Mma1 and Mma2.
using Rank2Kkernel = kernel::Rank2KGrouped<
typename DefaultRank2Kkernel::Mma1,
typename DefaultRank2Kkernel::Mma2,
typename DefaultRank2Kkernel::Epilogue,
ThreadblockSwizzle,
TransformA,
TransformB,
DefaultRank2Kkernel::kFillModeC,
DefaultRank2Kkernel::kBlasMode,
GroupScheduleMode_,
kInternalTranspose
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 12,470 | C | 34.030899 | 100 | 0.673857 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_batched.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmBatched {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorA::TensorRef ref_A;
int64_t stride_A;
typename Mma::IteratorB::Params params_B;
typename Mma::IteratorB::TensorRef ref_B;
int64_t stride_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::TensorRef ref_C;
int64_t stride_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::OutputTileIterator::TensorRef ref_D;
int64_t stride_D;
typename OutputOp::Params epilogue;
int batch_count;
int gemm_k_iterations;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() : swizzle_log_tile(0) { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size_,
cutlass::gemm::GemmCoord const & grid_tiled_shape_,
typename Mma::IteratorA::TensorRef ref_A_,
int64_t stride_A_,
typename Mma::IteratorB::TensorRef ref_B_,
int64_t stride_B_,
typename Epilogue::OutputTileIterator::TensorRef ref_C_,
int64_t stride_C_,
typename Epilogue::OutputTileIterator::TensorRef ref_D_,
int64_t stride_D_,
typename OutputOp::Params epilogue_,
int batch_count_
):
problem_size(problem_size_),
grid_tiled_shape(grid_tiled_shape_),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A_.layout()),
ref_A(ref_A_),
stride_A(stride_A_),
params_B(ref_B_.layout()),
ref_B(ref_B_),
stride_B(stride_B_),
params_C(ref_C_.layout()),
ref_C(ref_C_),
stride_C(stride_C_),
params_D(ref_D_.layout()),
ref_D(ref_D_),
stride_D(stride_D_),
epilogue(epilogue_),
batch_count(batch_count_),
gemm_k_iterations((problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK) {
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
GemmBatched() { }
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Each CTA handles multiple batch indices to accommodate limited range of CUDA grid's Z dimension
for (int batch_idx = threadblock_swizzle.get_batch_idx();
batch_idx < params.batch_count;
batch_idx += gridDim.z) {
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
0
};
cutlass::MatrixCoord tb_offset_B{
0,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
params.problem_size.mk(),
thread_idx,
tb_offset_A);
iterator_A.add_pointer_offset(params.stride_A * batch_idx);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
params.problem_size.kn(),
thread_idx,
tb_offset_B);
iterator_B.add_pointer_offset(params.stride_B * batch_idx);
//
// Main loop
//
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
mma(params.gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
OutputOp output_op(params.epilogue);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
params.ref_C.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
iterator_C.add_pointer_offset(params.stride_C * batch_idx);
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
iterator_D.add_pointer_offset(params.stride_D * batch_idx);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// run efficient epilogue
epilogue(output_op, iterator_D, accumulators, iterator_C);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 8,785 | C | 30.378571 | 102 | 0.625043 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_array.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmArray {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorA::Element const * const * ptr_A;
typename Mma::IteratorB::Params params_B;
typename Mma::IteratorB::Element const * const * ptr_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Element const * const * ptr_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::OutputTileIterator::Element * const * ptr_D;
int64_t stride_D;
typename OutputOp::Params epilogue;
int batch_count;
int gemm_k_iterations;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() :
swizzle_log_tile(0) { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size_,
cutlass::gemm::GemmCoord const & grid_tiled_shape_,
typename Mma::IteratorA::Element const * const * ptr_A_,
typename Mma::IteratorA::Layout layout_A,
typename Mma::IteratorB::Element const * const * ptr_B_,
typename Mma::IteratorB::Layout layout_B,
typename Epilogue::OutputTileIterator::Element const * const * ptr_C_,
typename Epilogue::OutputTileIterator::Layout layout_C,
typename Epilogue::OutputTileIterator::Element * const * ptr_D_,
typename Epilogue::OutputTileIterator::Layout layout_D,
typename OutputOp::Params epilogue_,
int batch_count_
):
problem_size(problem_size_),
grid_tiled_shape(grid_tiled_shape_),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(layout_A),
ptr_A(ptr_A_),
params_B(layout_B),
ptr_B(ptr_B_),
params_C(layout_C),
ptr_C(ptr_C_),
params_D(layout_D),
ptr_D(ptr_D_),
epilogue(epilogue_),
batch_count(batch_count_),
gemm_k_iterations((problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK) {
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
GemmArray() { }
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Each CTA handles multiple batch indices to accommodate limited range of CUDA grid's Z dimension
for (int batch_idx = threadblock_swizzle.get_batch_idx();
batch_idx < params.batch_count;
batch_idx += gridDim.z) {
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
0
};
cutlass::MatrixCoord tb_offset_B{
0,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
const_cast<typename Mma::IteratorA::Element *>(params.ptr_A[batch_idx]),
params.problem_size.mk(),
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
const_cast<typename Mma::IteratorB::Element *>(params.ptr_B[batch_idx]),
params.problem_size.kn(),
thread_idx,
tb_offset_B);
//
// Main loop
//
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
mma(params.gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
OutputOp output_op(params.epilogue);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
const_cast<typename Epilogue::OutputTileIterator::Element *>(params.ptr_C[batch_idx]),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ptr_D[batch_idx],
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// run efficient epilogue
epilogue(output_op, iterator_D, accumulators, iterator_C);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 8,717 | C | 31.898113 | 102 | 0.630492 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_symm_universal.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level SYMM/HEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/kernel/symm_universal.h"
#include "cutlass/gemm/kernel/default_symm.h"
#include "cutlass/gemm/kernel/default_symm_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Side Mode for A (kLeft or kRight)
SideMode SideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode FillModeA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by SYRK
typename Operator,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_ = BlasMode::kSymmetric,
///
typename Enable = void
>
struct DefaultSymmUniversal;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Real-valued SYMM/HEMM update kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode SideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode FillModeA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by SYMM/HEMM
typename Operator>
struct DefaultSymmUniversal<
ElementA,
LayoutA,
SideModeA,
FillModeA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
BlasMode::kSymmetric,
typename std::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultSymmkernel = typename kernel::DefaultSymm<
ElementA,
LayoutA,
SideModeA,
FillModeA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
BlasMode::kSymmetric
>::SymmKernel;
/// Define the kernel in terms of the default kernel
using SymmKernel = kernel::SymmUniversal<
typename DefaultSymmkernel::Mma1,
typename DefaultSymmkernel::Mma2,
typename DefaultSymmkernel::Epilogue,
ThreadblockSwizzle,
SideModeA,
FillModeA
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex-valued SYMM/HEMM update kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode SideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode FillModeA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by SYRK
typename Operator,
// BlasMode
BlasMode kBlasMode
>
struct DefaultSymmUniversal<
ElementA,
LayoutA,
SideModeA,
FillModeA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
kBlasMode,
typename std::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultSymmkernel = typename kernel::DefaultSymmComplex<
ElementA,
LayoutA,
SideModeA,
FillModeA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator,
SplitKSerial,
kBlasMode
>::SymmKernel;
/// Define the kernel in terms of the default kernel
using SymmKernel = kernel::SymmUniversal<
typename DefaultSymmkernel::Mma1,
typename DefaultSymmkernel::Mma2,
typename DefaultSymmkernel::Epilogue,
ThreadblockSwizzle,
SideModeA,
FillModeA
>;
};
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 10,315 | C | 29.075802 | 100 | 0.663209 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/semaphore.h"
#include "cutlass/arch/arch.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled.
>
struct Gemm {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static bool const kSplitKSerial = SplitKSerial;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorA::TensorRef ref_A;
typename Mma::IteratorB::Params params_B;
typename Mma::IteratorB::TensorRef ref_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::TensorRef ref_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::OutputTileIterator::TensorRef ref_D;
typename OutputOp::Params output_op;
int *semaphore;
int gemm_k_size;
// For gather+scatter operations
int const *gather_A_indices;
int const *gather_B_indices;
int const *scatter_D_indices;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(): swizzle_log_tile(0), semaphore(0), gemm_k_size(0) { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
typename OutputOp::Params output_op = typename OutputOp::Params(),
int *workspace = nullptr,
int const *gather_A_indices = nullptr,
int const *gather_B_indices = nullptr,
int const *scatter_D_indices = nullptr
):
problem_size(problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A.layout()),
ref_A(ref_A),
params_B(ref_B.layout()),
ref_B(ref_B),
params_C(ref_C.layout()),
ref_C(ref_C),
params_D(ref_D.layout()),
ref_D(ref_D),
output_op(output_op),
gather_A_indices(gather_A_indices),
gather_B_indices(gather_B_indices),
scatter_D_indices(scatter_D_indices) {
int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
semaphore = workspace;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
Gemm() { }
/// Determines whether kernel satisfies alignment
CUTLASS_HOST_DEVICE
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D) {
static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorA::Layout,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Mma::IteratorB::Layout,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (platform::is_same<typename Epilogue::OutputTileIterator::Layout,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<typename Epilogue::OutputTileIterator::Layout,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
if (!TensorRef_aligned(ref_A, kAlignmentA)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_D, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size,
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Problem size is a function of threadblock index in the K dimension
int problem_size_k = min(
params.problem_size.k(),
(threadblock_tile_offset.k() + 1) * params.gemm_k_size);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A,
params.gather_A_indices);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B,
params.gather_B_indices);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
if (!kSplitKSerial || gemm_k_iterations > 0) {
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
}
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// If performing a reduction via split-K, fetch the initial synchronization
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
params.ref_C.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset,
params.scatter_D_indices
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset,
params.scatter_D_indices
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op, iterator_D, accumulators, iterator_C);
//
// Release the semaphore
//
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 13,381 | C | 34.123359 | 108 | 0.615649 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemm_sparse.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts. Partial
specializations here choose 'device::GemmTransposed' to implement this functionality.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm.h"
#include "cutlass/gemm/kernel/sparse_gemm.h"
#include "cutlass/gemm/kernel/gemm_pipelined.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h"
#include "cutlass/gemm/threadblock/default_sparse_mma.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator>
struct DefaultSparseGemm;
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator>
struct DefaultSparseGemm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultSparseMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::SparseGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 7,983 | C | 40.583333 | 100 | 0.683828 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/trmm_universal.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/core_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
SideMode SideMode_, ///! Side Mode for the kernel (kLeft or kRight)
FillMode FillMode_, ///! Fill Mode for triangular matrix (kLower or kUpper)
DiagType DiagType_ ///! Diag Type for triangular matrix (kNonUnit or kUnit)
>
struct TrmmUniversal {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static SideMode const kSideMode = SideMode_;
static FillMode const kFillMode = FillMode_;
static DiagType const kDiagType = DiagType_;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode;
GemmCoord problem_size;
int batch_count;
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_D;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldd;
//
// Methods
//
Arguments():
mode(GemmUniversalMode::kGemm),
batch_count(1),
ptr_A(nullptr), ptr_B(nullptr), ptr_D(nullptr) { }
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_D,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldd
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_D(batch_stride_D),
lda(lda), ldb(ldb), ldd(ldd) {
}
/// Returns arguments for the transposed problem sizes
Arguments transposed_problem_size() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
return args;
}
/// Returns arguments for the transposed matrices
Arguments swapped_matrices() const {
Arguments args(*this);
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
GemmUniversalMode mode;
int batch_count;
int gemm_k_size;
void * ptr_A;
void * ptr_B;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_D;
int *semaphore;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
swizzle_log_tile(0),
params_A(0),
params_B(0),
params_D(0),
batch_count(0),
gemm_k_size(0),
mode(cutlass::gemm::GemmUniversalMode::kGemm),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_D(nullptr),
batch_stride_A(0),
batch_stride_B(0),
batch_stride_D(0),
semaphore(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
int gemm_k_size,
void *workspace = nullptr
):
problem_size(args.problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(args.lda),
params_B(args.ldb),
params_D(args.ldd),
output_op(args.epilogue),
mode(args.mode),
batch_count(args.batch_count),
gemm_k_size(gemm_k_size),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_D(args.ptr_D),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_D(args.batch_stride_D),
semaphore(static_cast<int *>(workspace)) {
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr) {
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_D = args.ptr_D;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_D = args.batch_stride_D;
output_op = args.epilogue;
semaphore = static_cast<int *>(workspace);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Methods
//
CUTLASS_DEVICE
TrmmUniversal() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
/******************************************************************************************************
First two cases: (Left Side, Lower Fill) and (Right Side, Upper Fill) are transpose of each other
- (Left Side, Lower Fill): calculate bottom of the CTA tile, then find the k-iterations
needed to process all elements till that coordinate.
- (Right Side, Upper Fill): calculate right end of the CTA tile, then find the k-iterations
needed to process all elements till that coordinate.
Last two cases: (Left Side, Upper Fill) and (Right Side, Lower Fill) are transpose of each other
- (Left Side, Upper Fill): calculate the top of the CTA tile, then find k-iterations
that can be skipped for all elements of this tile.
- (Right Side, Lower Fill): calculate the left start of the CTA tile, then find k-iterations
that can be skipped for all elements of this tile.
********************************************************************************************************/
if (kSideMode == SideMode::kLeft && kFillMode == FillMode::kLower) {
int k_iterations_till_diagonal = ((threadblock_tile_offset.m() + 1) * Mma::Shape::kM + Mma::Shape::kK - 1) / Mma::Shape::kK;
if (k_iterations_till_diagonal < gemm_k_iterations) {
gemm_k_iterations = k_iterations_till_diagonal;
}
} else if (kSideMode == SideMode::kRight && kFillMode == FillMode::kUpper) {
int k_iterations_till_diagonal = ((threadblock_tile_offset.n() + 1) * Mma::Shape::kN + Mma::Shape::kK - 1) / Mma::Shape::kK;
if (k_iterations_till_diagonal < gemm_k_iterations) {
gemm_k_iterations = k_iterations_till_diagonal;
}
} else if (kSideMode == SideMode::kLeft && kFillMode == FillMode::kUpper) {
int k_iterations_till_diagonal = ((threadblock_tile_offset.m()) * Mma::Shape::kM) / Mma::Shape::kK;
if (k_iterations_till_diagonal != 0) {
tb_offset_A += cutlass::MatrixCoord({0, k_iterations_till_diagonal * Mma::Shape::kK});
tb_offset_B += cutlass::MatrixCoord({k_iterations_till_diagonal * Mma::Shape::kK, 0});
gemm_k_iterations -= k_iterations_till_diagonal;
}
} else if (kSideMode == SideMode::kRight && kFillMode == FillMode::kLower) {
int k_iterations_till_diagonal = ((threadblock_tile_offset.n()) * Mma::Shape::kN) / Mma::Shape::kK;
if (k_iterations_till_diagonal != 0) {
tb_offset_A += cutlass::MatrixCoord({0, k_iterations_till_diagonal * Mma::Shape::kK});
tb_offset_B += cutlass::MatrixCoord({k_iterations_till_diagonal * Mma::Shape::kK, 0});
gemm_k_iterations -= k_iterations_till_diagonal;
}
}
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor (although irrelevant to this kernel as beta is zero).
typename Epilogue::OutputTileIterator iterator_C(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 19,537 | C | 31.563333 | 130 | 0.623944 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemv.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA_,
typename LayoutA_,
typename ElementB_,
typename ElementC_,
typename ElementAccumulator_,
typename EpilogueOutputOp_
>
struct Gemv {
public:
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using TensorRefA = TensorRef<ElementA, LayoutA>;
static_assert(platform::is_same<LayoutA, LayoutA_>::value,
"Only supported for column-major A matrix");
using ElementB = ElementB_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using EpilogueOutputOp = EpilogueOutputOp_;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
static int const kThreadCount = 32;
static int const kStages = 1;
static int const kAlignmentA = 1;
static int const kAlignmentB = 1;
static int const kAlignmentC = 1;
//
// Structures
//
/// Argument structure
struct Arguments {
MatrixCoord problem_size;
int32_t batch_count;
typename EpilogueOutputOp::Params output_op;
TensorRefA ref_A;
ElementB const *ptr_B;
ElementC const *ptr_C;
ElementC *ptr_D;
int64_t inc_B;
int64_t inc_C;
int64_t inc_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
//
// Methods
//
Arguments(): batch_count(0) { }
Arguments(
MatrixCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params output_op,
TensorRefA ref_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t inc_B,
int64_t inc_C,
int64_t inc_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D
):
problem_size(problem_size),
batch_count(batch_count),
output_op(output_op),
ref_A(ref_A),
ptr_B(static_cast<ElementB const *>(ptr_B)),
ptr_C(static_cast<ElementC const *>(ptr_C)),
ptr_D(static_cast<ElementC *>(ptr_D)),
inc_B(inc_B),
inc_C(inc_C),
inc_D(inc_D),
batch_stride_A(batch_stride_A),
batch_stride_B(batch_stride_B),
batch_stride_C(batch_stride_C),
batch_stride_D(batch_stride_D)
{ }
Arguments(
MatrixCoord problem_size,
typename EpilogueOutputOp::Params output_op,
TensorRefA ref_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t inc_B,
int64_t inc_C,
int64_t inc_D
):
Arguments(
problem_size,
1,
output_op,
ref_A,
ptr_B,
ptr_C,
ptr_D,
inc_B,
inc_C,
inc_D,
1,
1,
1,
1)
{ }
Status update(Arguments const &args) {
output_op = args.output_op;
ref_A = ref_A;
ptr_B = args.ptr_B;
ptr_C = args.ptr_C;
ptr_D = args.ptr_D;
return Status::kSuccess;
}
};
using Params = Arguments;
/// Shared memory storage structure
union SharedStorage {
};
public:
//
// Methods
//
CUTLASS_DEVICE
Gemv() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::MatrixCoord const & problem_size) {
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Loop over batch indices
for (int batch_idx = blockIdx.z; batch_idx < params.batch_count; batch_idx += gridDim.z) {
int i = blockIdx.x * kThreadCount + threadIdx.x;
ElementA const *ptr_A = params.ref_A.data() + i;
ElementB const *ptr_B = params.ptr_B;
ptr_A += batch_idx * params.batch_stride_A;
ptr_B += batch_idx * params.batch_stride_B;
ElementAccumulator accum = ElementAccumulator();
// Compute inner product
CUTLASS_PRAGMA_NO_UNROLL
for (int k = 0; k < params.problem_size.column(); ++k) {
// Fetch from A
ElementA a = ElementA();
if (i < params.problem_size.row()) {
a = *ptr_A;
}
ptr_A += params.ref_A.stride(0);
// Fetch from B
ElementB b = *ptr_B;
ptr_B += params.inc_B;
// Math
accum += ElementAccumulator(a) * ElementAccumulator(b);
}
//
// Epilogue phase
//
ElementC const *ptr_C = params.ptr_C + i * params.inc_C + batch_idx * params.batch_stride_C;
ElementC *ptr_D = params.ptr_D + i * params.inc_D + batch_idx * params.batch_stride_D;
EpilogueOutputOp output_op(params.output_op);
typename EpilogueOutputOp::FragmentAccumulator accum_fragment;
typename EpilogueOutputOp::FragmentOutput source_fragment;
typename EpilogueOutputOp::FragmentOutput output_fragment;
accum_fragment[0] = accum;
if (i < params.problem_size.row()) {
if (output_op.is_source_needed()) {
source_fragment[0] = *ptr_C;
output_fragment = output_op(accum_fragment, source_fragment);
}
else {
output_fragment = output_op(accum_fragment);
}
*ptr_D = output_fragment[0];
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,090 | C | 26.9 | 100 | 0.587515 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemv_batched_strided.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
namespace detail
{
template<typename ElementAlphaBeta, bool BetaIsZero>
struct GemvBatchedStridedEpilogueScaling
{
ElementAlphaBeta const & alpha;
ElementAlphaBeta const & beta;
CUTLASS_DEVICE
GemvBatchedStridedEpilogueScaling(ElementAlphaBeta& alpha_, ElementAlphaBeta& beta_) :
alpha(alpha_), beta(beta_)
{ }
template<typename FragmentCD, typename FragmentAccumulator>
CUTLASS_DEVICE
void operator()(FragmentAccumulator& accumulators,
FragmentCD const& fragment_C,
FragmentCD& fragment_D) const
{
using AccType = typename FragmentAccumulator::value_type;
using CDType = typename FragmentCD::value_type;
static_assert(FragmentCD::kElements == FragmentAccumulator::kElements,
"Mistmatch in fragment sizes.");
for (int i = 0; i < FragmentCD::kElements; ++i)
{
if (BetaIsZero)
{
fragment_D[i] = CDType(accumulators[i] * AccType(alpha));
}
else
{
fragment_D[i] = CDType(accumulators[i] * AccType(alpha)
+ AccType(fragment_C[i]) * AccType(beta));
}
}
}
};
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename GemvKernel, typename ElementAlphaBeta, bool BetaIsZero=false>
CUTLASS_DEVICE void GemvBatchedStridedDevice(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
ElementAlphaBeta beta,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_C,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldc,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
using ThreadBlockGemv = typename GemvKernel::ThreadBlockGemv;
using ThreadBlockSwizzle = typename GemvKernel::ThreadBlockSwizzle;
using EpilogueScale = detail::GemvBatchedStridedEpilogueScaling<ElementAlphaBeta, BetaIsZero>;
ThreadBlockSwizzle swizzler;
// Compute initial location in logical coordinates
BatchedGemmCoord tb_offset = swizzler.get_tile_offset();
int const batch_idx = swizzler.get_batch_idx();
// Offset to the batch
ref_A.add_pointer_offset(batch_idx*lda);
ref_B.add_pointer_offset(batch_idx*ldb);
// Construct iterators to A and B operands
typename GemvKernel::IteratorA::Params params_A(ref_A.layout());
typename GemvKernel::IteratorA iterator_A(
params_A,
ref_A.data(),
{ 1, problem_size.k() },
0,
{ 0, 0 });
typename GemvKernel::IteratorB::Params params_B(ref_B.layout());
typename GemvKernel::IteratorB iterator_B(
params_B,
ref_B.data(),
{ problem_size.k(), problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
//
// Main loop
//
// Construct thread-scoped matrix multiply
ThreadBlockGemv mma;
typename ThreadBlockGemv::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped gemv
mma(problem_size.mnk(), accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue (TODO: Epiloge as template argument)
//
typename GemvKernel::FragmentCD fragment_CD;
// Load C (skip if beta is zero)
if (!BetaIsZero)
{
tb_offset = swizzler.get_tile_offset();
ref_C.add_pointer_offset(batch_idx*ldc);
typename GemvKernel::IteratorCD::Params params_C(ref_C.layout());
typename GemvKernel::IteratorCD iterator_C(
params_C,
ref_C.data(),
{ 1, problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
iterator_C.load(fragment_CD);
}
// Apply alpha/beta scaling
EpilogueScale epilogue_scale(alpha, beta);
epilogue_scale(accumulators, fragment_CD, fragment_CD);
// Store D
tb_offset = swizzler.get_tile_offset();
ref_D.add_pointer_offset(batch_idx*ldd);
typename GemvKernel::IteratorCD::Params params_D(ref_D.layout());
typename GemvKernel::IteratorCD iterator_D(
params_D,
ref_D.data(),
{ 1, problem_size.n() },
threadIdx.x,
{ 0, tb_offset.n()*ThreadBlockGemv::Shape::kN });
iterator_D.store(fragment_CD);
}
template <typename GemvKernel, typename ElementAlphaBeta, bool BetaIsZero>
__global__ void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
ElementAlphaBeta beta,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_C,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldc,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, BetaIsZero>(
problem_size, alpha, beta, ref_A, lda, ref_B, ldb, ref_C, ldc, ref_D, ldd
);
}
template <typename GemvKernel, typename ElementAlphaBeta>
__global__ void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
ElementAlphaBeta alpha,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, true>(
problem_size, alpha, ElementAlphaBeta(0), ref_A, lda, ref_B, ldb, ref_D, ldd, ref_D, ldd
);
}
template <typename GemvKernel>
__global__ void GemvBatchedStrided(
cutlass::gemm::BatchedGemmCoord problem_size,
typename GemvKernel::IteratorA::TensorRef ref_A,
typename GemvKernel::IteratorA::TensorRef::LongIndex lda,
typename GemvKernel::IteratorB::TensorRef ref_B,
typename GemvKernel::IteratorB::TensorRef::LongIndex ldb,
typename GemvKernel::IteratorCD::TensorRef ref_D,
typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd)
{
using ElementAlphaBeta = typename GemvKernel::IteratorCD::Element;
GemvBatchedStridedDevice<GemvKernel, ElementAlphaBeta, true>(
problem_size, ElementAlphaBeta(1), ElementAlphaBeta(0), ref_A, lda, ref_B, ldb, ref_D, ldd, ref_D, ldd
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 8,979 | C | 35.653061 | 106 | 0.682481 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemv.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/gemm/threadblock/gemv.h"
#include "cutlass/gemm/threadblock/default_gemv_core.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the ThreadBlock tile - concept: gemm::GemmShape<>
typename ThreadBlockShape_,
/// Size of the per-thread shape - concept: gemm::GemmShape<>
typename ThreadShape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C/D matrix
typename ElementCD_,
/// Layout of C/D matrix (concept: MatrixLayout)
typename LayoutCD_,
/// Data type of the accumulator
typename ElementAccumulator_ = ElementCD_>
struct DefaultGemv {
/// Shape of Threadblock-level matrix operation (concept: GemmShape)
using ThreadBlockShape = ThreadBlockShape_;
/// Shape of warp-level matrix operation (concept: GemmShape)
using ThreadShape = ThreadShape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulators
using ElementAccumulator = ElementAccumulator_;
/// Data type of accumulators (same as C/D)
using LayoutAccumulator = LayoutCD_;
/// Data type of input/output matrix C/D
using ElementCD = ElementCD_;
/// Layout of input/output matrix C/D
using LayoutCD = LayoutCD_;
// Define the core components
using Core = typename cutlass::gemm::threadblock::DefaultGemvCore<
ThreadBlockShape, ThreadShape, ElementA, LayoutA, ElementB, LayoutB,
ElementAccumulator, LayoutAccumulator>;
// Define the threadblock-scoped gemv
using ThreadBlockGemv = cutlass::gemm::threadblock::Gemv<Core>;
// Iterator for multiplicand A
using IteratorA = typename ThreadBlockGemv::IteratorA;
// Iterator for multiplicand B
using IteratorB = typename ThreadBlockGemv::IteratorB;
/// Policy for the iterator that reads/writes C/D
using IteratorPolicyCD = typename platform::conditional<
platform::is_same<LayoutCD, layout::RowMajor>::value,
cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous<
layout::PitchLinearShape<ThreadBlockShape::kN, ThreadBlockShape::kM>, Core::kThreadsPerN, ThreadShape::kN>,
cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided<
layout::PitchLinearShape<ThreadBlockShape::kM, ThreadBlockShape::kN>, Core::kThreadsPerN, ThreadShape::kM>>::type;
/// Iterator that reads/writes C/D
using IteratorCD = cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<ThreadBlockShape::kM, ThreadBlockShape::kN>, ElementCD, LayoutCD, 0, IteratorPolicyCD>;
/// Fragment storage for C/D
using FragmentCD = typename IteratorCD::Fragment;
// Define the threadblock swizzle
using ThreadBlockSwizzle = cutlass::gemm::threadblock::GemvBatchedStridedThreadblockDefaultSwizzle;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 5,349 | C | 39.225564 | 124 | 0.688727 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_rank_k_universal.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level Rank k definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/kernel/rank_k_universal.h"
#include "cutlass/gemm/kernel/default_rank_k.h"
#include "cutlass/gemm/kernel/default_rank_k_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by SYRK
typename Operator,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_ = BlasMode::kSymmetric,
///
typename Enable = void
>
struct DefaultRankKUniversal;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Real-valued Rank k update kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by Rank2k
typename Operator>
struct DefaultRankKUniversal<
ElementA,
LayoutA,
ComplexTransform::kNone, // transform A
kAlignmentA,
ElementC,
LayoutC,
FillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
BlasMode::kSymmetric,
typename std::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultRankKkernel = typename kernel::DefaultRankK<
ElementA,
LayoutA,
kAlignmentA,
ElementC,
LayoutC,
FillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
BlasMode::kSymmetric
>::RankKkernel;
/// Define the kernel in terms of the default kernel
using RankKkernel = kernel::RankKUniversal<
typename DefaultRankKkernel::Mma,
typename DefaultRankKkernel::Epilogue,
ThreadblockSwizzle,
FillModeC
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex-valued Rank 2K update kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by SYRK
typename Operator,
// BlasMode
BlasMode kBlasMode
>
struct DefaultRankKUniversal<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementC,
LayoutC,
FillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
kBlasMode,
typename std::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultRankKkernel = typename kernel::DefaultRankKComplex<
ElementA,
LayoutA,
ElementC,
LayoutC,
FillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
TransformA,
Operator,
SplitKSerial,
kBlasMode
>::RankKkernel;
/// Define the kernel in terms of the default kernel
using RankKkernel = kernel::RankKUniversal<
typename DefaultRankKkernel::Mma,
typename DefaultRankKkernel::Epilogue,
ThreadblockSwizzle,
FillModeC
>;
};
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 9,444 | C | 29.866013 | 100 | 0.664549 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemm_layernorm_mainloop_fusion.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts. Partial
specializations here choose 'device::GemmTransposed' to implement this functionality.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_layernorm_mainloop_fusion.h"
#include "cutlass/gemm/threadblock/default_mma_layernorm_mainloop_fusion.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for Scale/Bias vectors
typename ElementScaleBias,
/// Layout type for Scale/Bias vectors
typename LayoutScaleBias,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone>
struct DefaultGemmLayernormMainloopFusion {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMmaLayernormMainloopFusion<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementScaleBias, LayoutScaleBias, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator, false, SharedMemoryClear>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::GemmLayernormMainloopFusion<Mma, Epilogue, ThreadblockSwizzle>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 5,848 | C | 41.384058 | 113 | 0.691518 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_layernorm_mainloop_fusion.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a multistage GEMM kernel with layernorm operations fused in mainloop.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmLayernormMainloopFusion {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
using ElementScaleBias = typename Mma::IteratorVarMean::Element;
using LayoutScaleBias = typename Mma::IteratorVarMean::Layout;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
//
// Structures
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_var;
void const * ptr_mean;
void const * ptr_gamma;
void const * ptr_beta;
void const * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_var;
int64_t batch_stride_mean;
int64_t batch_stride_gamma;
int64_t batch_stride_beta;
int64_t batch_stride_C;
typename LayoutA::Stride stride_a;
typename LayoutB::Stride stride_b;
typename LayoutScaleBias::Stride stride_var;
typename LayoutScaleBias::Stride stride_mean;
typename LayoutScaleBias::Stride stride_gamma;
typename LayoutScaleBias::Stride stride_beta;
typename LayoutC::Stride stride_c;
typename LayoutC::Stride stride_d;
typename LayoutA::Stride::LongIndex lda;
typename LayoutB::Stride::LongIndex ldb;
typename LayoutScaleBias::Stride::LongIndex ld_var;
typename LayoutScaleBias::Stride::LongIndex ld_mean;
typename LayoutScaleBias::Stride::LongIndex ld_gamma;
typename LayoutScaleBias::Stride::LongIndex ld_beta;
typename LayoutC::Stride::LongIndex ldc;
typename LayoutC::Stride::LongIndex ldd;
int const * ptr_gather_A_indices;
int const * ptr_gather_B_indices;
int const * ptr_scatter_D_indices;
//
// Methods
//
Arguments():
ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr),
ptr_var(nullptr), ptr_mean(nullptr),
ptr_gamma(nullptr), ptr_beta(nullptr),
ptr_gather_A_indices(nullptr),
ptr_gather_B_indices(nullptr),
ptr_scatter_D_indices(nullptr)
{}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_var,
void const * ptr_mean,
void const * ptr_gamma,
void const * ptr_beta,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_var,
int64_t batch_stride_mean,
int64_t batch_stride_gamma,
int64_t batch_stride_beta,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride stride_a,
typename LayoutB::Stride stride_b,
typename LayoutScaleBias::Stride stride_var,
typename LayoutScaleBias::Stride stride_mean,
typename LayoutScaleBias::Stride stride_gamma,
typename LayoutScaleBias::Stride stride_beta,
typename LayoutC::Stride stride_c,
typename LayoutC::Stride stride_d,
int const *ptr_gather_A_indices = nullptr,
int const *ptr_gather_B_indices = nullptr,
int const *ptr_scatter_D_indices = nullptr)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
ptr_var(ptr_var), ptr_mean(ptr_mean),
ptr_gamma(ptr_gamma), ptr_beta(ptr_beta),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C),
batch_stride_var(batch_stride_var), batch_stride_mean(batch_stride_mean),
batch_stride_gamma(batch_stride_gamma), batch_stride_beta(batch_stride_beta),
lda(0), ldb(0), ldc(0), ldd(0),
ld_var(0), ld_mean(0),
ld_gamma(0), ld_beta(0),
stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d),
stride_var(stride_var), stride_mean(stride_mean),
stride_gamma(stride_gamma), stride_beta(stride_beta),
ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices),
ptr_scatter_D_indices(ptr_scatter_D_indices)
{
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_var,
void const * ptr_mean,
void const * ptr_gamma,
void const * ptr_beta,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_var,
int64_t batch_stride_mean,
int64_t batch_stride_gamma,
int64_t batch_stride_beta,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::LongIndex lda,
typename LayoutB::Stride::LongIndex ldb,
typename LayoutScaleBias::Stride::LongIndex ld_var,
typename LayoutScaleBias::Stride::LongIndex ld_mean,
typename LayoutScaleBias::Stride::LongIndex ld_gamma,
typename LayoutScaleBias::Stride::LongIndex ld_beta,
typename LayoutC::Stride::LongIndex ldc,
typename LayoutC::Stride::LongIndex ldd,
int const *ptr_gather_A_indices = nullptr,
int const *ptr_gather_B_indices = nullptr,
int const *ptr_scatter_D_indices = nullptr)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
ptr_var(ptr_var), ptr_mean(ptr_mean),
ptr_gamma(ptr_gamma), ptr_beta(ptr_beta),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C),
batch_stride_var(batch_stride_var), batch_stride_mean(batch_stride_mean),
batch_stride_gamma(batch_stride_gamma), batch_stride_beta(batch_stride_beta),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd),
ld_var(ld_var), ld_mean(ld_mean),
ld_gamma(ld_gamma), ld_beta(ld_beta),
ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices),
ptr_scatter_D_indices(ptr_scatter_D_indices)
{
stride_a = make_Coord(lda);
stride_b = make_Coord(ldb);
stride_c = make_Coord(ldc);
stride_d = make_Coord(ldd);
stride_var = make_Coord(ld_var);
stride_mean = make_Coord(ld_mean);
stride_gamma = make_Coord(ld_gamma);
stride_beta = make_Coord(ld_beta);
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.stride_a, args.stride_b);
std::swap(args.batch_stride_A, args.batch_stride_B);
std::swap(args.ptr_gather_A_indices, args.ptr_gather_B_indices);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>;
//
// Data members
//
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
void * ptr_A;
void * ptr_B;
void * ptr_var;
void * ptr_mean;
void * ptr_gamma;
void * ptr_beta;
void * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_var;
int64_t batch_stride_mean;
int64_t batch_stride_gamma;
int64_t batch_stride_beta;
int64_t batch_stride_C;
int * ptr_gather_A_indices;
int * ptr_gather_B_indices;
int * ptr_scatter_D_indices;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a),
params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b),
params_C(args.ldc ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldc) : args.stride_c),
params_D(args.ldd ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldd) : args.stride_d),
output_op(args.epilogue),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_var(const_cast<void *>(args.ptr_var)),
ptr_mean(const_cast<void *>(args.ptr_mean)),
ptr_gamma(const_cast<void *>(args.ptr_gamma)),
ptr_beta(const_cast<void *>(args.ptr_beta)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(args.ptr_D),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_var(args.batch_stride_var),
batch_stride_mean(args.batch_stride_mean),
batch_stride_gamma(args.batch_stride_gamma),
batch_stride_beta(args.batch_stride_beta),
batch_stride_C(args.batch_stride_C),
ptr_gather_A_indices(const_cast<int *>(args.ptr_gather_A_indices)),
ptr_gather_B_indices(const_cast<int *>(args.ptr_gather_B_indices)),
ptr_scatter_D_indices(const_cast<int *>(args.ptr_scatter_D_indices))
{}
/// Lightweight update given a subset of arguments. Problem geometry is assumed
/// to remain the same.
void update(Arguments const &args)
{
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_var = const_cast<void *>(args.ptr_var);
ptr_mean = const_cast<void *>(args.ptr_mean);
ptr_gamma = const_cast<void *>(args.ptr_gamma);
ptr_beta = const_cast<void *>(args.ptr_beta);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
ptr_gather_A_indices = const_cast<int *>(args.ptr_gather_A_indices);
ptr_gather_B_indices = const_cast<int *>(args.ptr_gather_B_indices);
ptr_scatter_D_indices = const_cast<int *>(args.ptr_scatter_D_indices);
output_op = args.epilogue;
CUTLASS_TRACE_HOST("GemmUniversal::Params::update()");
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
CUTLASS_TRACE_HOST("GemmUniversal::can_implement()");
static int const kAlignmentA = (platform::is_same<LayoutA,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutA,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<LayoutB,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutB,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmLayernormMainloopFusion op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A,
params.ptr_gather_A_indices);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B,
params.ptr_gather_B_indices);
// Construct iterators to A var/mean vector
typename Mma::IteratorVarMean iterator_var_mean(
params.problem_size.m(),
static_cast<ElementScaleBias const *>(params.ptr_var),
static_cast<ElementScaleBias const *>(params.ptr_mean),
thread_idx,
MatrixCoord(0, (threadblock_tile_offset.m() * Mma::Shape::kM))
);
// Construct iterators to A scale/bias vector
typename Mma::IteratorGammaBeta iterator_gamma_beta(
problem_size_k,
static_cast<ElementScaleBias const *>(params.ptr_gamma),
static_cast<ElementScaleBias const *>(params.ptr_beta),
thread_idx,
MatrixCoord(
0, (threadblock_tile_offset.k() * Mma::Shape::kK)
)
);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
iterator_var_mean,
iterator_gamma_beta,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
params.ptr_scatter_D_indices
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
params.ptr_scatter_D_indices
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 27,281 | C | 34.066838 | 120 | 0.631648 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemm_grouped.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts. Partial
specializations here choose 'device::GemmTransposed' to implement this functionality.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/kernel/gemm_grouped.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_complex.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/layout/permute.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly,
/// Operation performed by GEMM
typename Operator = typename device::DefaultGemmConfiguration<
OperatorClass, ArchTag, ElementA_, ElementB_, ElementC_,
ElementAccumulator>::Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
/// Permute result D
typename PermuteDLayout = layout::NoPermute,
///
typename Enable = void
>
struct DefaultGemmGrouped;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Real-valued GEMM kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
/// Permute result D
typename PermuteDLayout
>
struct DefaultGemmGrouped<
ElementA,
LayoutA,
ComplexTransform::kNone, // transform A
kAlignmentA,
ElementB,
LayoutB,
ComplexTransform::kNone, // transform B
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
GroupScheduleMode_,
Operator,
SharedMemoryClear,
PermuteDLayout,
typename platform::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
> {
// If true, we must construct a 'transposed-and-exchanged' Mma operator.
static bool const kInternalTranspose = platform::is_same<LayoutC, layout::ColumnMajor>::value;
using MapArguments = kernel::detail::MapArguments<
ElementA,
LayoutA,
ComplexTransform::kNone,
kAlignmentA,
ElementB,
LayoutB,
ComplexTransform::kNone,
kAlignmentB,
LayoutC,
kInternalTranspose
>;
// Define the default GEMM kernel
using DefaultGemmKernel = typename kernel::DefaultGemm<
typename MapArguments::ElementA,
typename MapArguments::LayoutA,
MapArguments::kAlignmentA,
typename MapArguments::ElementB,
typename MapArguments::LayoutB,
MapArguments::kAlignmentB,
ElementC,
typename MapArguments::LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
true,
Operator,
SharedMemoryClear,
false, /*GatherA*/
false, /*GatherB*/
false, /*ScatterD*/
PermuteDLayout
>::GemmKernel;
/// Define the kernel in terms of the default kernel
using GemmKernel = kernel::GemmGrouped<
typename DefaultGemmKernel::Mma,
typename DefaultGemmKernel::Epilogue,
ThreadblockSwizzle,
GroupScheduleMode_,
kInternalTranspose
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex-valued GEMM kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear
>
struct DefaultGemmGrouped<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
GroupScheduleMode_,
Operator,
SharedMemoryClear,
layout::NoPermute, /*PermuteDLayout*/
typename platform::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
> {
// If true, we must construct a 'transposed-and-exchanged' Mma operator.
static bool const kInternalTranspose = platform::is_same<LayoutC, layout::ColumnMajor>::value;
using MapArguments = kernel::detail::MapArguments<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
LayoutC,
kInternalTranspose
>;
using DefaultGemmKernel = typename kernel::DefaultGemmComplex<
typename MapArguments::ElementA,
typename MapArguments::LayoutA,
typename MapArguments::ElementB,
typename MapArguments::LayoutB,
ElementC,
typename MapArguments::LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MapArguments::kTransformA,
MapArguments::kTransformB,
Operator,
false
>::GemmKernel;
/// Define the kernel in terms of the default kernel
using GemmKernel = kernel::GemmGrouped<
typename DefaultGemmKernel::Mma,
typename DefaultGemmKernel::Epilogue,
ThreadblockSwizzle,
GroupScheduleMode_,
kInternalTranspose
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 12,385 | C | 31.171428 | 100 | 0.678805 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_rank_k.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level RankK definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/rank_k_universal.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op_blas3.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_ = BlasMode::kSymmetric>
struct DefaultRankK;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator>
struct DefaultRankK<
ElementA, LayoutA, kAlignmentA,
ElementC,layout::RowMajor, FillModeC,
ElementAccumulator, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator> {
/// Define the threadblock-scoped matrix multiply-accumulate (A x AT)
using Mma = typename cutlass::gemm::threadblock::DefaultMma<
ElementA, LayoutA,
kAlignmentA,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
kAlignmentA,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpBlas3<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount, BlasMode::kSymmetric>::Epilogue;
/// Define the kernel-level Rank2 operator.
using RankKkernel = kernel::RankKUniversal<Mma, Epilogue, ThreadblockSwizzle, FillModeC>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for C and D matrix operands
typename ElementC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator>
struct DefaultRankK<
ElementA, LayoutA, kAlignmentA,
ElementC,layout::RowMajor, FillModeC,
ElementAccumulator, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator> {
/// Define the threadblock-scoped matrix multiply-accumulate (A x AT)
using Mma = typename cutlass::gemm::threadblock::DefaultMma<
ElementA, LayoutA,
kAlignmentA,
ElementA, typename layout::LayoutTranspose<LayoutA>::type,
kAlignmentA,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpBlas3<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount, BlasMode::kSymmetric>::Epilogue;
/// Define the kernel-level Rank2 operator.
using RankKkernel = kernel::RankKUniversal<Mma, Epilogue, ThreadblockSwizzle, FillModeC>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 9,872 | C | 38.810484 | 100 | 0.675344 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_universal_streamk.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/barrier.h"
#include "cutlass/block_striped.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock mapping function
>
struct GemmUniversalStreamk {
public:
//
// Types and constants
//
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
/// The per-thread tile of raw accumulators
using AccumulatorTile = typename Mma::FragmentC;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Workspace bytes per thread block
static size_t const kWorkspaceBytesPerBlock =
__NV_STD_MAX(
kThreadCount * sizeof(AccumulatorTile),
Epilogue::kWorkspaceBytesPerBlock);
/// Block-striped reduction utility
using BlockStripedReduceT = BlockStripedReduce<kThreadCount, AccumulatorTile>;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode;
GemmCoord problem_size;
int batch_count;
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
typename LayoutA::Stride stride_a;
typename LayoutB::Stride stride_b;
typename LayoutC::Stride stride_c;
typename LayoutC::Stride stride_d;
typename LayoutA::Stride::LongIndex lda;
typename LayoutB::Stride::LongIndex ldb;
typename LayoutC::Stride::LongIndex ldc;
typename LayoutC::Stride::LongIndex ldd;
int sm_limit; /// Carvout override: when the above are defaulted, the number of SMs that dispatch heuristics will attempt to load-balance
//
// Methods
//
/// Default Constructor
Arguments():
mode(GemmUniversalMode::kGemm),
batch_count(1),
ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr),
sm_limit(-1)
{}
/// Constructor
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride stride_a,
typename LayoutB::Stride stride_b,
typename LayoutC::Stride stride_c,
typename LayoutC::Stride stride_d,
int sm_limit = -1 /// Carvout override: when the above are defaulted, the number of SMs that dispatch heuristics will attempt to load-balance
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d), sm_limit(sm_limit)
{
CUTLASS_TRACE_HOST("GemmUniversalStreamk::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Constructor
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::LongIndex lda,
typename LayoutB::Stride::LongIndex ldb,
typename LayoutC::Stride::LongIndex ldc,
typename LayoutC::Stride::LongIndex ldd,
int sm_limit = -1 /// Carvout override: when the above are defaulted, the number of SMs that dispatch heuristics will attempt to load-balance
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), sm_limit(sm_limit)
{
stride_a = make_Coord(lda);
stride_b = make_Coord(ldb);
stride_c = make_Coord(ldc);
stride_d = make_Coord(ldd);
CUTLASS_TRACE_HOST("GemmUniversalStreamk::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const
{
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.stride_a, args.stride_b);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
/// Parameters structure
struct Params
{
public:
//
// Data members
//
ThreadblockSwizzle block_mapping;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
GemmUniversalMode mode;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
void *barrier_workspace;
void *partials_workspace;
protected:
//
// Host-only dispatch-utilities
//
/// Pad the given allocation size up to the nearest cache line
static size_t cacheline_align_up(size_t size)
{
static const int CACHELINE_SIZE = 128;
return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE;
}
/// Get the workspace size needed for barrier
size_t get_barrier_workspace_size() const
{
// For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction,
// each reduction block needs its own synchronization flag.
int sk_blocks = block_mapping.sk_regions * block_mapping.sk_blocks_per_region;
int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks);
return cacheline_align_up(sizeof(typename Barrier::T) * num_flags);
}
/// Get the workspace size needed for intermediate partial sums
size_t get_partials_workspace_size() const
{
int sk_blocks = block_mapping.sk_regions * block_mapping.sk_blocks_per_region;
return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks);
}
public:
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a),
params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b),
params_C(args.ldc ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldc) : args.stride_c),
params_D(args.ldd ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldd) : args.stride_d),
output_op(args.epilogue),
mode(args.mode),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(args.ptr_D),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
barrier_workspace(nullptr),
partials_workspace(nullptr)
{
// Number of SMs to make available for StreamK decomposition
int avail_sms = (args.sm_limit == -1) ?
device_sms :
fast_min(args.sm_limit, device_sms);
// Initialize the block mapping structure
block_mapping = ThreadblockSwizzle(
typename ThreadblockSwizzle::template KernelTraits<GemmUniversalStreamk>(),
args.mode,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count,
sm_occupancy,
avail_sms);
}
/// Returns the workspace size (in bytes) needed for these parameters
size_t get_workspace_size() const
{
return
get_barrier_workspace_size() +
get_partials_workspace_size();
}
/// Assign and initialize the specified workspace buffer. Assumes
/// the memory allocated to workspace is at least as large as get_workspace_size().
Status init_workspace(
void *workspace,
cudaStream_t stream = nullptr)
{
uint8_t *ptr = static_cast<uint8_t*>(workspace);
// Establish partials workspace
partials_workspace = nullptr;
size_t partials_workspace_bytes = get_partials_workspace_size();
if (partials_workspace_bytes > 0)
{
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
partials_workspace = ptr;
ptr += partials_workspace_bytes;
}
// Establish barrier workspace
barrier_workspace = nullptr;
size_t barrier_workspace_bytes = get_barrier_workspace_size();
if (barrier_workspace_bytes > 0)
{
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
barrier_workspace = ptr;
ptr += barrier_workspace_bytes;
}
// Zero-initialize barrier workspace
if (barrier_workspace)
{
size_t barrier_workspace_bytes = get_barrier_workspace_size();
CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes");
cudaError_t result = cudaMemsetAsync(
barrier_workspace,
0,
barrier_workspace_bytes,
stream);
if (result != cudaSuccess) {
CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Returns the GEMM volume in thread block tiles
cutlass::gemm::GemmCoord get_tiled_shape() const
{
return block_mapping.tiled_shape;
}
/// Returns the total number of thread blocks to launch
int get_grid_blocks() const
{
dim3 grid_dims = get_grid_dims();
return grid_dims.x * grid_dims.y * grid_dims.z;
}
/// Returns the grid extents in thread blocks to launch
dim3 get_grid_dims() const
{
return block_mapping.get_grid_dims();
}
/// Lightweight update given a subset of arguments. Problem geometry is assumed
/// to remain the same.
void update(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversalStreamK::Params::update()");
// Update input/output pointers
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
batch_stride_C = args.batch_stride_C;
batch_stride_D = args.batch_stride_D;
output_op = args.epilogue;
}
};
/// Tile work descriptor
struct TileWorkDesc
{
/// The linear tile index
int tile_idx;
/// The location of this tile (in threadblock-tile coordinates) in the output matrix
cutlass::gemm::GemmCoord tiled_coord;
// The first global-scoped MAC-iteration this threadblock will perform for this tile
int iter_begin;
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
int k_begin;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
int k_end;
/// The number of remaining MAC-iterations this threadblock will perform for this tile
int k_iters_remaining;
// Whether this block will perform the first iteration of this tile
CUTLASS_DEVICE
bool tile_started()
{
return (k_begin == 0);
}
// Whether this block will perform the last iteration of this tile
CUTLASS_DEVICE
bool tile_finished(Params const ¶ms)
{
return (k_end == params.block_mapping.problem_size.k());
}
};
/// Shared memory storage structure
union SharedStorage
{
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
protected:
//
// Data members
//
/// GEMM problem parameters
Params const ¶ms;
/// Shared storage reference
SharedStorage &shared_storage;
/// ID within the threadblock
int thread_idx;
/// ID of warp
int warp_idx;
/// ID of each thread within a warp
int lane_idx;
/// Block index
int block_idx;
/// Threadblock scoped epilogue
Epilogue epilogue;
public:
//
// Host-only dispatch API
//
/// Determines whether the GEMM problem size satisfies this kernel's
/// alignment requirements
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size)
{
CUTLASS_TRACE_HOST("GemmUniversal::can_implement()");
static int const kAlignmentA = (platform::is_same<LayoutA,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutA,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<LayoutB,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutB,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
/// Determines whether the GEMM problem satisfies this kernel's
/// alignment requirements
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
protected:
//
// Device-only utility methods
//
/// Iterator for fetching tile fragments from A
CUTLASS_DEVICE
typename Mma::IteratorA init_iterator_A(TileWorkDesc &tile_work)
{
// The input A matrix
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
// Update input pointers based on batched/array mode
if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A;
}
if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[tile_work.tiled_coord.k()];
}
int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM;
int m_end = params.block_mapping.problem_size.m();
return Mma::IteratorA(
params.params_A,
ptr_A,
{ m_end, tile_work.k_end },
threadIdx.x,
{ m_begin, tile_work.k_begin });
}
/// Iterator for fetching tile fragments from B
CUTLASS_DEVICE
typename Mma::IteratorB init_iterator_B(TileWorkDesc &tile_work)
{
// The input B matrix
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
// Update input pointers based on batched/array mode
if (params.mode == GemmUniversalMode::kBatched) {
ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B;
}
if (params.mode == GemmUniversalMode::kArray) {
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[tile_work.tiled_coord.k()];
}
int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN;
int n_end = params.block_mapping.problem_size.n();
return Mma::IteratorB(
params.params_B,
ptr_B,
{ tile_work.k_end, n_end },
threadIdx.x,
{ tile_work.k_begin, n_begin });
}
CUTLASS_DEVICE
void init_dp_tile_work(
TileWorkDesc &tile_work,
int tile_idx)
{
// The linear tile index
tile_work.tile_idx = tile_idx;
// The first global-scoped MAC-iteration this threadblock will perform for this tile
tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile;
// The number of MAC-iterations this threadblock will perform for this tile
tile_work.k_iters_remaining = params.block_mapping.iters_per_tile;
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_begin = 0;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_end = params.block_mapping.problem_size.k();
// The location of this tile (in threadblock-tile coordinates) in the output matrix
tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
}
CUTLASS_DEVICE
void init_sk_tile_work(
TileWorkDesc &tile_work,
int tile_idx,
int block_iter_begin,
int block_iter_end)
{
// The linear tile index
tile_work.tile_idx = tile_idx;
// The first global-scoped MAC-iteration for this tile
int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile;
// The first global-scoped MAC-iteration this threadblock will perform for this tile
tile_work.iter_begin = max(block_iter_begin, tile_iter_begin);
// The first tile-scoped MAC-iteration this threadblock will perform for this tile
int k_iter_begin = tile_work.iter_begin - tile_iter_begin;
// The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile
int k_iter_end = block_iter_end - tile_iter_begin;
// The number of MAC-iterations this threadblock will perform for this tile
tile_work.k_iters_remaining = k_iter_end - k_iter_begin;
// The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_begin = k_iter_begin * Mma::Shape::kK;
// The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
tile_work.k_end = min(
params.block_mapping.problem_size.k(), // extent of k domain
(k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment
// The location of this tile (in threadblock-tile coordinates) in the output matrix
tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
}
/// Share accumulators with peers
CUTLASS_DEVICE
void share_accumulators(AccumulatorTile const &accumulator_tile, int first_block_idx)
{
AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
int accum_tile_offset = first_block_idx * kThreadCount;
if (block_idx == first_block_idx)
{
// First peer initializes the workspace partials
BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
}
else
{
// Subsequent peers atomically accumulate into the workspace partials
if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic)
{
// Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them
Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1);
}
else
{
// Turnstile reduction order: wait until the previous peer has written
int wait_count = block_idx - first_block_idx;
Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count);
}
// Perform reduction in workspace
BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
}
// Signal our arrival
Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx);
}
/// Acquire accumulators from peers
CUTLASS_DEVICE
void acquire_accumulators(
AccumulatorTile &accumulator_tile,
int first_block_idx)
{
AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
// Wait for arrival
int num_carry_in = block_idx - first_block_idx;
Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in);
// Load and add peer-partials accumulator tile to local accumulator tile
int accum_tile_offset = first_block_idx * kThreadCount;
BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx);
}
/// Perform epilogue computations and output
CUTLASS_DEVICE
void do_epilogue(
TileWorkDesc &tile_work,
AccumulatorTile &accumulator_tile)
{
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
// Update pointers for batched/array mode(s)
if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += tile_work.tiled_coord.k() * params.batch_stride_C;
ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D;
}
if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[tile_work.tiled_coord.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[tile_work.tiled_coord.k()];
}
// Location of this tile in item-coords
MatrixCoord threadblock_item_begin(
tile_work.tiled_coord.m() * Mma::Shape::kM,
tile_work.tiled_coord.n() * Mma::Shape::kN
);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Execute the epilogue operator to update the destination tensor.
epilogue(
EpilogueOutputOp(params.output_op),
iterator_D,
accumulator_tile,
iterator_C);
}
CUTLASS_DEVICE
void separate_reduction(int reduce_idx)
{
int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx;
// Reduce by sk-tile (every tile contributed to by one or more blocks)
reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments;
reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments;
int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile;
int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile - 1;
peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first);
peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last);
// Wait for peers to complete
int peer_idx_end = peer_idx_last + 1;
int num_peers = peer_idx_end - peer_idx_begin;
Barrier::wait_eq_reset(
params.barrier_workspace,
thread_idx,
(reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx,
num_peers);
/// The location of this tile (in threadblock-tile coordinates) in the output matrix
GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx);
// Location of this tile in item-coords
MatrixCoord threadblock_item_begin(
tiled_coord.m() * Mma::Shape::kM,
tiled_coord.n() * Mma::Shape::kN
);
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
// Update pointers for batched/array mode(s)
if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += tiled_coord.k() * params.batch_stride_C;
ptr_D += tiled_coord.k() * params.batch_stride_D;
}
if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[tiled_coord.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[tiled_coord.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.block_mapping.problem_size.mn(),
thread_idx,
threadblock_item_begin);
// Execute the epilogue operator to update the destination tensor.
epilogue.reduce(
peer_idx_begin,
peer_idx_end,
reduce_fragment_idx,
params.partials_workspace,
EpilogueOutputOp(params.output_op),
iterator_D,
iterator_C);
}
CUTLASS_DEVICE
void process_tile(
TileWorkDesc tile_work,
int dp_start_block_idx,
int block_iter_begin)
{
// Initialize input iterators
typename Mma::IteratorA iterator_A = init_iterator_A(tile_work);
typename Mma::IteratorB iterator_B = init_iterator_B(tile_work);
// Initialize accumulators
AccumulatorTile accumulator_tile;
accumulator_tile.clear();
// Perform this tile's range of multiply-accumulate (MAC) iterations
Mma mma(
shared_storage.main_loop,
thread_idx,
warp_idx,
lane_idx);
mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile);
if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) ||
(params.block_mapping.reduction_blocks == 0) ||
(block_idx >= dp_start_block_idx))
{
//
// Cooperative SK peer reduction or DP block
//
int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx);
if (!tile_work.tile_finished(params)) {
// Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace
share_accumulators(accumulator_tile, first_block_idx);
}
else
{
// DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile
if (!tile_work.tile_started())
{
// A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks
acquire_accumulators(accumulator_tile, first_block_idx);
}
do_epilogue(tile_work, accumulator_tile);
}
}
else
{
//
// Separate peer reduction
//
// Share accumulator partial sums with peer threadblock(s) through scratch workspace
epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started());
// Signal arrival
Barrier::arrive_range_inc(
params.barrier_workspace,
thread_idx,
tile_work.tile_idx * Epilogue::kAccumulatorFragments,
Epilogue::kAccumulatorFragments);
}
}
/// Executes one GEMM
CUTLASS_DEVICE
void gemm()
{
// Initialize block's iteration range
int tile_idx, block_iter_begin, block_iters_remaining;
int sk_padding_start_block_idx = params.block_mapping.sk_regions * params.block_mapping.sk_blocks_per_region;
int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms;
int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks;
int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks;
if (block_idx < sk_padding_start_block_idx)
{
// This is a SK block
int block_iter_end;
params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end);
block_iters_remaining = block_iter_end - block_iter_begin;
tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1);
}
else if (block_idx < dp_start_block_idx)
{
// This is a filler block
return;
}
else if (block_idx < reduce_start_block_idx)
{
// This is a DP block
int dp_block_idx = block_idx - dp_start_block_idx;
int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles;
// Blocks in first DP wave get configured number of tiles
tile_idx = first_dp_tile + dp_block_idx;
int tile_allottment = params.block_mapping.dp_first_wave_tiles;
// Blocks in subsequent DP waves get 1 tile
if (dp_block_idx >= params.block_mapping.avail_sms) {
tile_allottment = 1;
tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms;
}
block_iter_begin = 0;
block_iters_remaining = params.block_mapping.iters_per_tile * tile_allottment;
}
else if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed) &&
(block_idx < grid_padding_start_block_idx))
{
// This is a reduction threadblock
int reduce_block_idx = block_idx - reduce_start_block_idx;
separate_reduction(reduce_block_idx);
return;
}
else
{
// This is a filler block
return;
}
// Iteration-processing loop body
CUTLASS_PRAGMA_NO_UNROLL
while (true)
{
// Initialize tile work descriptor
TileWorkDesc tile_work;
if (block_idx >= dp_start_block_idx)
{
init_dp_tile_work(tile_work, tile_idx);
// DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1)
if ((tile_idx < params.block_mapping.sk_tiles) ||
(tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape.m()) ||
(tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape.n()))
{
break;
}
}
else
{
init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
}
// Perform this block's share of work for this tile
process_tile(tile_work, dp_start_block_idx, block_iter_begin);
// Update remaining work for this block
block_iters_remaining -= tile_work.k_iters_remaining;
if (block_iters_remaining == 0) {
// Done
break;
}
// Continue to next tile
__syncthreads();
if (block_idx >= dp_start_block_idx)
{
// DP block consume their tiles at stride
tile_idx += params.block_mapping.avail_sms;
}
else
{
// SK blocks consume their tiles in backwards order
tile_idx--;
}
}
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmUniversalStreamk op(params, shared_storage);
op();
}
// Constructor
CUTLASS_DEVICE
GemmUniversalStreamk(
Params const ¶ms,
SharedStorage &shared_storage)
:
params(params),
shared_storage(shared_storage),
thread_idx(threadIdx.x),
warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code
lane_idx(threadIdx.x % 32),
block_idx(params.block_mapping.get_block_idx()),
epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx)
{}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()()
{
// Do the GEMM
gemm();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 38,687 | C | 32.151671 | 166 | 0.638432 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_pipelined.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Mma, typename Epilogue, typename ThreadblockSwizzle>
__global__ void GemmPipelined(
cutlass::gemm::GemmCoord problem_size,
cutlass::gemm::GemmCoord grid_tiled_shape,
typename Mma::IteratorA::Params params_A,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::Params params_B,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::Params params_epilogue
) {
// Shared storage needed by threadblock-scoped matrix multiply-accumulate
__shared__ union {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
} shared_storage;
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
int swizzle_log_tile = ThreadblockSwizzle().get_log_tile(grid_tiled_shape);
cutlass::gemm::GemmCoord tb_tile_offset = threadblock_swizzle.get_tile_offset(swizzle_log_tile);
if (grid_tiled_shape.m() <= tb_tile_offset.m() ||
grid_tiled_shape.n() <= tb_tile_offset.n()) {
return;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
tb_tile_offset.m() * Mma::Shape::kM,
tb_tile_offset.k()
};
cutlass::MatrixCoord tb_offset_B{
tb_tile_offset.k(),
tb_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int tb_thread_id = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params_A,
ref_A.data(),
{problem_size.m(), problem_size.k()},
tb_thread_id,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params_B,
ref_B.data(),
{problem_size.k(), problem_size.n()},
tb_thread_id,
tb_offset_B);
int warp_id = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_id = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, tb_thread_id, warp_id, lane_id);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
mma(problem_size, accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
Epilogue epilogue(
params_epilogue,
shared_storage.epilogue,
tb_thread_id,
warp_id,
lane_id);
tb_tile_offset = threadblock_swizzle.get_tile_offset(swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
tb_tile_offset.m() * Mma::Shape::kM,
tb_tile_offset.n() * Mma::Shape::kN
);
// run efficient epilogue
epilogue({problem_size.m(), problem_size.n()}, accumulators, threadblock_offset);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 5,165 | C | 31.490566 | 100 | 0.650726 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_symm_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level SYMM/HEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/symm_universal.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_multistage_trmm_complex.h"
#include "cutlass/gemm/threadblock/default_multistage_mma_complex.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Side Mode for A (kLeft or kRight)
SideMode kSideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode kFillModeA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Blas3 computation mode
BlasMode BlasMode_ = BlasMode::kSymmetric>
struct DefaultSymmComplex;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture complex datatype (symmetric)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode kSideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode kFillModeA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultSymmComplex<
ElementA, LayoutA, kSideModeA, kFillModeA, ElementB, LayoutB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
Operator, SplitKSerial, BlasMode::kSymmetric> {
static BlasMode const kBlasMode = BlasMode::kSymmetric;
// Complex Transform don't appply to A or B for SYMM
static ComplexTransform const TransformA = ComplexTransform::kNone;
static ComplexTransform const TransformB = ComplexTransform::kNone;
/// Define the threadblock-scoped triagular matrix multiply-accumulate
/// TRMM - with diagonal: alpha * A * B or alpha * B * A
static const DiagType kDiagTypeMma1 = DiagType::kNonUnit;
using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
ElementA, LayoutA,
ElementB, LayoutB,
kSideModeA, kFillModeA, kDiagTypeMma1,
ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape,
Stages, TransformA, TransformB, Operator>::ThreadblockMma;
/// Define the threadblock-scoped triagular matrix multiply-accumulate
/// TRMM - withOUT diagonal: alpha * AT * B or alpha * B * AT
static const DiagType kDiagTypeMma2 = DiagType::kZero;
using LayoutAMma2 = typename platform::conditional<
(kSideModeA == SideMode::kLeft),
typename layout::LayoutTranspose<LayoutA>::type,
LayoutA
>::type;
using LayoutBMma2 = typename platform::conditional<
(kSideModeA == SideMode::kLeft),
LayoutB,
typename layout::LayoutTranspose<LayoutB>::type
>::type;
using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
ElementA, LayoutAMma2,
ElementB, LayoutBMma2,
kSideModeA, InvertFillMode<kFillModeA>::mode, kDiagTypeMma2,
ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape,
Stages, TransformA, TransformB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator>::Epilogue;
/// Define the kernel-level Symm operator.
using SymmKernel = kernel::SymmUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, kSideModeA, kFillModeA>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture complex datatype (hermitian)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode kSideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode kFillModeA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultSymmComplex<
ElementA, LayoutA, kSideModeA, kFillModeA, ElementB, LayoutB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
Operator, SplitKSerial, BlasMode::kHermitian> {
static BlasMode const kBlasMode = BlasMode::kHermitian;
/// Define the threadblock-scoped triagular matrix multiply-accumulate
/// TRMM - with diagonal: alpha * A * B or alpha * B * A
static const DiagType kDiagTypeMma1 = DiagType::kNonUnit;
static ComplexTransform const TransformAMma1 = ComplexTransform::kNone;
static ComplexTransform const TransformBMma1 = ComplexTransform::kNone;
using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
ElementA, LayoutA,
ElementB, LayoutB,
kSideModeA, kFillModeA, kDiagTypeMma1,
ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape,
Stages, TransformAMma1, TransformBMma1, Operator, BlasMode::kHermitian>::ThreadblockMma;
/// Define the threadblock-scoped triagular matrix multiply-accumulate
/// TRMM - withOUT diagonal - with conjugate transpose: alpha * AT * B or alpha * B * AT
static const DiagType kDiagTypeMma2 = DiagType::kZero;
using LayoutAMma2 = typename platform::conditional<
(kSideModeA == SideMode::kLeft),
typename layout::LayoutTranspose<LayoutA>::type,
LayoutA
>::type;
using LayoutBMma2 = typename platform::conditional<
(kSideModeA == SideMode::kLeft),
LayoutB,
typename layout::LayoutTranspose<LayoutB>::type
>::type;
static ComplexTransform const TransformAMma2 = (kSideModeA == SideMode::kLeft) ?
ComplexTransform::kConjugate : ComplexTransform::kNone;
static ComplexTransform const TransformBMma2 = (kSideModeA == SideMode::kLeft) ?
ComplexTransform::kNone : ComplexTransform::kConjugate;
using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
ElementA, LayoutAMma2,
ElementB, LayoutBMma2,
kSideModeA, InvertFillMode<kFillModeA>::mode, kDiagTypeMma2,
ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape,
Stages, TransformAMma2, TransformBMma2, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator>::Epilogue;
/// Define the kernel-level Symm operator.
using SymmKernel = kernel::SymmUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, kSideModeA, kFillModeA>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture complex datatype (symmetric)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode kSideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode kFillModeA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultSymmComplex<
ElementA, LayoutA, kSideModeA, kFillModeA, ElementB, LayoutB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
Operator, SplitKSerial, BlasMode::kSymmetric> {
static BlasMode const kBlasMode = BlasMode::kSymmetric;
// Complex Transform don't appply to A or B for SYMM
static ComplexTransform const TransformA = ComplexTransform::kNone;
static ComplexTransform const TransformB = ComplexTransform::kNone;
/// Define the threadblock-scoped triagular matrix multiply-accumulate
/// TRMM - with diagonal: alpha * A * B or alpha * B * A
static const DiagType kDiagTypeMma1 = DiagType::kNonUnit;
using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
ElementA, LayoutA,
ElementB, LayoutB,
kSideModeA, kFillModeA, kDiagTypeMma1,
ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape,
Stages, TransformA, TransformB, Operator>::ThreadblockMma;
/// Define the threadblock-scoped triagular matrix multiply-accumulate
/// TRMM - withOUT diagonal: alpha * AT * B or alpha * B * AT
static const DiagType kDiagTypeMma2 = DiagType::kZero;
using LayoutAMma2 = typename platform::conditional<
(kSideModeA == SideMode::kLeft),
typename layout::LayoutTranspose<LayoutA>::type,
LayoutA
>::type;
using LayoutBMma2 = typename platform::conditional<
(kSideModeA == SideMode::kLeft),
LayoutB,
typename layout::LayoutTranspose<LayoutB>::type
>::type;
using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
ElementA, LayoutAMma2,
ElementB, LayoutBMma2,
kSideModeA, InvertFillMode<kFillModeA>::mode, kDiagTypeMma2,
ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape,
Stages, TransformA, TransformB, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator>::Epilogue;
/// Define the kernel-level Symm operator.
using SymmKernel = kernel::SymmUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, kSideModeA, kFillModeA>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture complex datatype (hermitian)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode kSideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode kFillModeA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial>
struct DefaultSymmComplex<
ElementA, LayoutA, kSideModeA, kFillModeA, ElementB, LayoutB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages,
Operator, SplitKSerial, BlasMode::kHermitian> {
static BlasMode const kBlasMode = BlasMode::kHermitian;
/// Define the threadblock-scoped triagular matrix multiply-accumulate
/// TRMM - with diagonal: alpha * A * B or alpha * B * A
static const DiagType kDiagTypeMma1 = DiagType::kNonUnit;
static ComplexTransform const TransformAMma1 = ComplexTransform::kNone;
static ComplexTransform const TransformBMma1 = ComplexTransform::kNone;
using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
ElementA, LayoutA,
ElementB, LayoutB,
kSideModeA, kFillModeA, kDiagTypeMma1,
ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape,
Stages, TransformAMma1, TransformBMma1, Operator, BlasMode::kHermitian>::ThreadblockMma;
/// Define the threadblock-scoped triagular matrix multiply-accumulate
/// TRMM - withOUT diagonal - with conjugate transpose: alpha * AT * B or alpha * B * AT
static const DiagType kDiagTypeMma2 = DiagType::kZero;
using LayoutAMma2 = typename platform::conditional<
(kSideModeA == SideMode::kLeft),
typename layout::LayoutTranspose<LayoutA>::type,
LayoutA
>::type;
using LayoutBMma2 = typename platform::conditional<
(kSideModeA == SideMode::kLeft),
LayoutB,
typename layout::LayoutTranspose<LayoutB>::type
>::type;
static ComplexTransform const TransformAMma2 = (kSideModeA == SideMode::kLeft) ?
ComplexTransform::kConjugate : ComplexTransform::kNone;
static ComplexTransform const TransformBMma2 = (kSideModeA == SideMode::kLeft) ?
ComplexTransform::kNone : ComplexTransform::kConjugate;
using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
ElementA, LayoutAMma2,
ElementB, LayoutBMma2,
kSideModeA, InvertFillMode<kFillModeA>::mode, kDiagTypeMma2,
ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape,
Stages, TransformAMma2, TransformBMma2, Operator>::ThreadblockMma;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
EpilogueOutputOp::kCount, Operator>::Epilogue;
/// Define the kernel-level Symm operator.
using SymmKernel = kernel::SymmUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, kSideModeA, kFillModeA>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 21,830 | C | 41.88998 | 109 | 0.673752 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_grouped_problem_visitor.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Scheduler for grouped GEMM
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/gemm/kernel/grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
// Helper for correctly representing problem sizes in grouped kernels
template <
typename ThreadblockShape,
bool Transposed
>
struct GemmGroupedProblemSizeHelper {
static bool const kTransposed = Transposed;
CUTLASS_HOST_DEVICE
static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) {
return cutlass::gemm::GemmCoord(
((problem.m() - 1 + ThreadblockShape::kM) / ThreadblockShape::kM),
((problem.n() - 1 + ThreadblockShape::kN) / ThreadblockShape::kN),
1);
}
CUTLASS_HOST_DEVICE
static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {
if (kTransposed) {
swap(problem.m(), problem.n());
}
}
CUTLASS_HOST_DEVICE
static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) {
return grid.m() * grid.n();
}
};
} // namespace detail
/// Visitor class to abstract away the algorithm for iterating over tiles
template <typename ThreadblockShape,
GroupScheduleMode GroupScheduleMode_,
int PrefetchTileCount,
int ThreadCount,
bool Transposed = false>
struct GemmGroupedProblemVisitor : public GroupedProblemVisitor<
detail::GemmGroupedProblemSizeHelper<ThreadblockShape, Transposed>,
ThreadblockShape,
GroupScheduleMode_,
PrefetchTileCount,
ThreadCount> {
static bool const kTransposed = Transposed;
using ProblemSizeHelper = detail::GemmGroupedProblemSizeHelper<ThreadblockShape, Transposed>;
using Base = GroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape, GroupScheduleMode_, PrefetchTileCount, ThreadCount>;
using Params = typename Base::Params;
using SharedStorage = typename Base::SharedStorage;
//
// Methods
//
CUTLASS_DEVICE
GemmGroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base (params_, shared_storage_, block_idx)
{}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 4,691 | C | 37.146341 | 126 | 0.617566 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts. Partial
specializations here choose 'device::GemmTransposed' to implement this functionality.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm.h"
#include "cutlass/gemm/kernel/gemm_pipelined.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/layout/permute.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
/// Gather operand A by using an index array
bool GatherA = false,
/// Gather operand B by using an index array
bool GatherB = false,
/// Scatter result D by using an index array
bool ScatterD = false,
/// Permute result D
typename PermuteDLayout = layout::NoPermute,
///
typename Enable = void
>
struct DefaultGemm;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Hopper Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Scatter result D by using an index array
bool ScatterD,
/// Permute result D
typename PermuteDLayout
>
struct DefaultGemm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator, SharedMemoryClear, GatherA, GatherB, ScatterD, PermuteDLayout> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator, false, SharedMemoryClear, GatherA, GatherB>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount, ScatterD, PermuteDLayout>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operand
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Scatter result D by using an index array
bool ScatterD,
/// Permute result D
typename PermuteDLayout
>
struct DefaultGemm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,
LayoutC, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator, SharedMemoryClear, GatherA, GatherB, ScatterD, PermuteDLayout> {
static_assert(platform::is_same<LayoutC, layout::RowMajor>::value
|| platform::is_same<LayoutC, layout::AffineRankN<2>>::value,
"Epilogue in the kernel level must be row major");
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, LayoutC, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator, false, SharedMemoryClear, GatherA, GatherB>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using RegularEpilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount, ScatterD, PermuteDLayout>::Epilogue;
using Affine2Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpAffineRankN<
2, ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount>::Epilogue;
using Epilogue = typename platform::conditional<platform::is_same<LayoutC, layout::RowMajor>::value,
RegularEpilogue,
Affine2Epilogue>::type;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Turing Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Scatter result D by using an index array
bool ScatterD,
/// Permute result D
typename PermuteDLayout
>
struct DefaultGemm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC, layout::RowMajor,
ElementAccumulator,
arch::OpClassTensorOp,
arch::Sm75,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
SplitKSerial,
Operator,
SharedMemoryClear,
GatherA,
GatherB,
ScatterD,
PermuteDLayout
> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
layout::RowMajor,
arch::OpClassTensorOp,
arch::Sm75,
ThreadblockShape,
WarpShape,
InstructionShape,
2,
Operator,
false,
SharedMemoryClear,
GatherA,
GatherB
>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
typename Mma::Operator,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
ScatterD,
PermuteDLayout
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Integer Matrix Multiply Interleaved layout
template <
/// Element type for A matrix operand
typename ElementA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Number of Interleaved k
int InterleavedK,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear>
struct DefaultGemm<
ElementA, layout::ColumnMajorInterleaved<InterleavedK>, kAlignmentA,
ElementB, layout::RowMajorInterleaved<InterleavedK>, kAlignmentB, ElementC,
layout::ColumnMajorInterleaved<InterleavedK>, int32_t,
arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape,
InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages,
SplitKSerial, Operator, SharedMemoryClear, false, false, false> {
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementAccumulator = int32_t;
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, LayoutC, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages, Operator,
true, SharedMemoryClear>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::
DefaultInterleavedEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Turing Integer Matrix Multiply Interleaved layout
template <
/// Element type for A matrix operand
typename ElementA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of Interleaved k
int InterleavedK,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear>
struct DefaultGemm<ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
kAlignmentA, ElementB,
layout::RowMajorInterleaved<InterleavedK>, kAlignmentB,
ElementC, layout::ColumnMajorInterleaved<InterleavedK>,
int32_t, arch::OpClassTensorOp, arch::Sm75, ThreadblockShape,
WarpShape, InstructionShape, EpilogueOutputOp,
ThreadblockSwizzle, 2, SplitKSerial, Operator, SharedMemoryClear,
false, false, false> {
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementAccumulator = int32_t;
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassTensorOp, arch::Sm75, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, true>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::
DefaultInterleavedEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Volta architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Scatter result D by using an index array
bool ScatterD,
/// Permute result D
typename PermuteDLayout
>
struct DefaultGemm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC, layout::RowMajor,
ElementAccumulator,
arch::OpClassTensorOp,
arch::Sm70,
ThreadblockShape,
WarpShape,
GemmShape<8, 8, 4>,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
SplitKSerial,
Operator,
SharedMemoryClear,
GatherA,
GatherB,
ScatterD,
PermuteDLayout
> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
layout::RowMajor,
arch::OpClassTensorOp,
arch::Sm70,
ThreadblockShape,
WarpShape,
GemmShape<8, 8, 4>,
2,
Operator,
false,
SharedMemoryClear,
GatherA,
GatherB
>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
ThreadblockShape,
typename Mma::Operator,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
ScatterD,
PermuteDLayout
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for SIMT
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operand
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Scatter result D by using an index array
bool ScatterD,
/// Permute result D
typename PermuteDLayout
>
struct DefaultGemm<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
GemmShape<1, 1, 1>,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
SplitKSerial,
Operator,
SharedMemoryClear,
GatherA,
GatherB,
ScatterD,
PermuteDLayout,
typename platform::enable_if< ! platform::is_same<ArchTag, arch::Sm80>::value >::type > {
static_assert(platform::is_same<LayoutC, layout::RowMajor>::value
|| platform::is_same<LayoutC, layout::AffineRankN<2>>::value,
"Epilogue in the kernel level must be row major");
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
LayoutC,
arch::OpClassSimt,
arch::Sm50,
ThreadblockShape,
WarpShape,
GemmShape<1, 1, 1>,
2,
Operator,
false,
SharedMemoryClear,
GatherA,
GatherB>::ThreadblockMma;
static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
/// Define the epilogue
using RegularEpilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
kEpilogueElementsPerAccess,
ScatterD,
PermuteDLayout
>::Epilogue;
using Affine2Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimtAffineRankN<
2,
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
kEpilogueElementsPerAccess
>::Epilogue;
using Epilogue = typename platform::conditional<platform::is_same<LayoutC, layout::RowMajor>::value,
RegularEpilogue,
Affine2Epilogue>::type;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operand
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages
int Stages,
/// If true, kernel is configured to support serial reduction in the epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Scatter result D by using an index array
bool ScatterD,
/// Permute result D
typename PermuteDLayout
>
struct DefaultGemm<ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
arch::Sm80,
ThreadblockShape,
WarpShape,
GemmShape<1, 1, 1>,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
SharedMemoryClear,
GatherA,
GatherB,
ScatterD,
PermuteDLayout> {
static_assert(platform::is_same<LayoutC, layout::RowMajor>::value
|| platform::is_same<LayoutC, layout::AffineRankN<2>>::value,
"Epilogue in the kernel level must be row major");
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, LayoutC, arch::OpClassSimt, arch::Sm80,
ThreadblockShape, WarpShape, GemmShape<1, 1, 1>, Stages,
Operator, false, SharedMemoryClear, GatherA, GatherB>::ThreadblockMma;
static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
/// Define the epilogue
using RegularEpilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
kEpilogueElementsPerAccess,
ScatterD,
PermuteDLayout
>::Epilogue;
using Affine2Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimtAffineRankN<
2,
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
kEpilogueElementsPerAccess
>::Epilogue;
using Epilogue = typename platform::conditional<platform::is_same<LayoutC, layout::RowMajor>::value,
RegularEpilogue,
Affine2Epilogue>::type;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for SIMT DP4A
template <
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Layout type for C matrix operand
typename LayoutC,
/// Element type for C and D matrix operands
typename ElementC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear
>
struct DefaultGemm<int8_t, LayoutA, kAlignmentA, int8_t, LayoutB, kAlignmentB,
ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt,
ArchTag, ThreadblockShape, WarpShape, GemmShape<1, 1, 4>,
EpilogueOutputOp, ThreadblockSwizzle, 2, SplitKSerial,
Operator, SharedMemoryClear, false, false, false> {
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using ElementB = int8_t;
using OperatorClass = arch::OpClassSimt;
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMma<ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
LayoutC,
arch::OpClassSimt,
arch::Sm50,
ThreadblockShape,
WarpShape,
InstructionShape,
2,
Operator
>::ThreadblockMma;
static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
typename Mma::Operator,
EpilogueOutputOp,
kEpilogueElementsPerAccess
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Wmma Gemm Kernel
template <
///< Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear
>
struct DefaultGemm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC, LayoutC,
ElementAccumulator,
arch::OpClassWmmaTensorOp,
ArchTag,
ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
SharedMemoryClear,
false,
false,
false
> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMma<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWmmaTensorOp<
ThreadblockShape,
typename Mma::Operator,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 37,752 | C | 34.582469 | 102 | 0.678322 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/sparse_gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/semaphore.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled.
>
struct SparseGemm {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static bool const kSplitKSerial = SplitKSerial;
static int const kSparse = Mma::kSparse;
static int const kMetaSizeInBits = Mma::kMetaSizeInBits;
static int const kMaxID2 = Mma::kMaxID2;
static int const kElementsPerElementE = Mma::kElementsPerElementE;
using ElementE = typename Mma::ElementE;
using LayoutE = typename Mma::LayoutE;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorA::TensorRef ref_A;
typename Mma::IteratorB::Params params_B;
typename Mma::IteratorB::TensorRef ref_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::TensorRef ref_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::OutputTileIterator::TensorRef ref_D;
typename Mma::IteratorE::Params params_E;
typename Mma::IteratorE::TensorRef ref_E;
typename OutputOp::Params output_op;
int *semaphore;
int gemm_k_iterations;
int gemm_k_size;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(): swizzle_log_tile(0), semaphore(0), gemm_k_iterations(0), gemm_k_size(0) { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
typename Mma::IteratorE::TensorRef ref_E,
typename OutputOp::Params output_op = typename OutputOp::Params(),
int *workspace = nullptr
):
problem_size(problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A.layout()),
ref_A(ref_A),
params_B(ref_B.layout()),
ref_B(ref_B),
params_C(ref_C.layout()),
ref_C(ref_C),
params_D(ref_D.layout()),
ref_D(ref_D),
params_E(ref_E.layout()),
ref_E(ref_E),
output_op(output_op) {
int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
semaphore = workspace;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
SparseGemm() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
typename Mma::IteratorE::TensorRef ref_E) {
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
static int const kAlignmentE = Mma::IteratorE::AccessType::kElements;
if (!TensorRef_aligned(ref_A, kAlignmentA)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_C, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_D, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_E, kAlignmentE)) {
return Status::kErrorMisalignedOperand;
}
if ((problem_size.m() % kAlignmentA) || ((problem_size.k() / kSparse) % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC) ||
(problem_size.m() % kAlignmentE) || ((problem_size.k() / kSparse) % kAlignmentE)) {
return Status::kErrorMisalignedOperand;
}
// The k dimension has to be the multiple of the Threadblock k because out
// of bound meta data would be initialized to 0 by acync.zfill but 0 is not
// a valid meta data.
if (problem_size.k() % Mma::Shape::kK) {
return Status::kErrorMisalignedOperand;
}
// M dimension has to be multiple of 32 (sparse float) or 16 (sparse int)
// because of the row reordering of operand E
static int const kAlignmentM = (sizeof(ElementE) == 2) ? 32 : 16;
if (problem_size.m() % kAlignmentM) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size / kSparse,
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
threadblock_tile_offset.n() * Mma::Shape::kN
};
cutlass::MatrixCoord tb_offset_E{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size / kSparse,
};
// Problem size is a function of threadblock index in the K dimension
int problem_size_k = min(
params.problem_size.k(),
(threadblock_tile_offset.k() + 1) * params.gemm_k_size);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - tb_offset_B.row() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A, B, and E operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k / kSparse},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
typename Mma::IteratorE iterator_E(
params.params_E, params.ref_E.data(),
{params.problem_size.m(),
problem_size_k / kSparse / kElementsPerElementE},
thread_idx, tb_offset_E);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
if (!kSplitKSerial || gemm_k_iterations > 0) {
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, iterator_E, accumulators);
}
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// If performing a reduction via split-K, fetch the initial synchronization
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
params.ref_C.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op, iterator_D, accumulators, iterator_C);
//
// Release the semaphore
//
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
__threadfence();
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 13,610 | C | 32.942643 | 108 | 0.647465 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/symm_universal.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma1_, ///! Threadblock-scoped triangular matrix multiply-accumulate (A*B or B*A)
typename Mma2_, ///! Threadblock-scoped triangular matrix multiply-accumulate (AT*B or B*AT)
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
SideMode SideMode_, ///! Side Mode for the kernel (kLeft or kRight)
FillMode FillMode_ ///! Fill Mode for triangular matrix (kLower or kUpper)
>
struct SymmUniversal {
public:
using Mma1 = Mma1_;
using Mma2 = Mma2_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma1::IteratorA::Element;
using ElementB = typename Mma1::IteratorB::Element;
// Mma1 (TRMM - with diagonal: C_tmp = alpha * A * B)
using LayoutA = typename Mma1::IteratorA::Layout;
using LayoutBT = typename Mma1::IteratorB::Layout;
static ComplexTransform const kMma1TransformA = Mma1::kTransformA;
static ComplexTransform const kMma1TransformB = Mma1::kTransformB;
// Mma2 (TRMM - withOUT diagonal: alpha * AT * B)
using LayoutB = typename Mma2::IteratorA::Layout;
using LayoutAT = typename Mma2::IteratorB::Layout;
static ComplexTransform const kMma2TransformA = Mma2::kTransformA;
static ComplexTransform const kMma2TransformB = Mma2::kTransformB;
// Common type definitions for Mma1 and Mma2
using Operator = typename Mma1::Operator;
using OperatorClass = typename Mma1::Operator::OperatorClass;
using ThreadblockShape = typename Mma1::Shape;
using WarpShape = typename Mma1::Operator::Shape;
using InstructionShape = typename Mma1::Policy::Operator::InstructionShape;
using ArchTag = typename Mma1::ArchTag;
static int const kStages = Mma1::kStages;
static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements;
// Output related typedefinitions
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static SideMode const kSideModeA = SideMode_;
static FillMode const kFillModeA = FillMode_;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma1::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode;
GemmCoord problem_size;
int batch_count;
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc;
typename LayoutC::Stride::Index ldd;
//
// Methods
//
Arguments():
mode(GemmUniversalMode::kGemm),
batch_count(1),
ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr) { }
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd
):
mode(mode),
problem_size(problem_size),
batch_count(batch_count),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd) {
}
/// Returns arguments for the transposed problem sizes
Arguments transposed_problem_size() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
return args;
}
/// Returns arguments for the transposed matrices
Arguments swapped_matrices() const {
Arguments args(*this);
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
// Mma1 Iterator A and B params
typename Mma1::IteratorA::Params params_A_mma1;
typename Mma1::IteratorB::Params params_B_mma1;
// Mma2 Iterator A and B params
typename Mma2::IteratorA::Params params_A_mma2;
typename Mma2::IteratorB::Params params_B_mma2;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
GemmUniversalMode mode;
int batch_count;
int gemm_k_size;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_D;
int *semaphore;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
swizzle_log_tile(0),
params_A_mma1(0),
params_B_mma1(0),
params_A_mma2(0),
params_B_mma2(0),
params_C(0),
params_D(0),
batch_count(0),
gemm_k_size(0),
mode(cutlass::gemm::GemmUniversalMode::kGemm),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
batch_stride_A(0),
batch_stride_B(0),
batch_stride_C(0),
batch_stride_D(0),
semaphore(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
int gemm_k_size,
void *workspace = nullptr
):
problem_size(args.problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A_mma1(args.lda),
params_B_mma1(args.ldb),
params_A_mma2(args.lda),
params_B_mma2(args.ldb),
params_C(args.ldc),
params_D(args.ldd),
output_op(args.epilogue),
mode(args.mode),
batch_count(args.batch_count),
gemm_k_size(gemm_k_size),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(const_cast<void *>(args.ptr_D)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
semaphore(static_cast<int *>(workspace)) {
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr) {
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
output_op = args.epilogue;
semaphore = static_cast<int *>(workspace);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma1::SharedStorage mma1_main_loop;
typename Mma2::SharedStorage mma2_main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Methods
//
CUTLASS_DEVICE
SymmUniversal() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
/// Executes two GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_MxK_mma1{
threadblock_tile_offset.m() * Mma1::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_KxN_mma1{
offset_k,
threadblock_tile_offset.n() * Mma1::Shape::kN
};
cutlass::MatrixCoord tb_offset_MxK_mma2{
threadblock_tile_offset.m() * Mma1::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_KxN_mma2{
offset_k,
threadblock_tile_offset.n() * Mma1::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply for Mma1
Mma1 mma1(shared_storage.mma1_main_loop, thread_idx, warp_idx, lane_idx);
// Construct thread-scoped matrix multiply for Mma2
Mma2 mma2(shared_storage.mma2_main_loop, thread_idx, warp_idx, lane_idx);
typename Mma1::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
int gemm_k_iterations_mma1 = gemm_k_iterations;
int gemm_k_iterations_mma2 = gemm_k_iterations;
/******************************************************************************************************
* SYMM (Side Mode, Fill Mode) is made of two TRMMs:
First TRMM (Mma1: Side Mode, Fill Mode, Non-Unit Diag): (A * B) or (B * A)
Second TRMM (Mma2: Side Mode, Inverted Fill Mode, Unit Diag): (AT * B) or (B * AT)
* For the first TRMM (Mma1) of SYMM, the following method is used to calculate the k-iterations:
First two cases: (Left Side, Lower Fill) and (Right Side, Upper Fill) are transpose of each other
- (Left Side, Lower Fill): calculate bottom of the CTA tile, then find the k-iterations
needed to process all elements till that coordinate.
- (Right Side, Upper Fill): calculate right end of the CTA tile, then find the k-iterations
needed to process all elements till that coordinate.
Last two cases: (Left Side, Upper Fill) and (Right Side, Lower Fill) are transpose of each other
- (Left Side, Upper Fill): calculate the top of the CTA tile, then find k-iterations
that can be skipped for all elements of this tile.
- (Right Side, Lower Fill): calculate the left start of the CTA tile, then find k-iterations
that can be skipped for all elements of this tile.
* For the second TRMM (Mma2) of SYMM, the k-iterations and threadblock offsets are calculated
the same way as the first TRMM (Mma1) of same side mode but with inverted fill mode.
For example, if the first TRMM is left sided with lower fill, the second TRMM would be
left sided with upper fill.
********************************************************************************************************/
if (kSideModeA == SideMode::kLeft && kFillModeA == FillMode::kLower) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 < gemm_k_iterations) {
gemm_k_iterations_mma1 = k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.m()) * Mma1::Shape::kM) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 != 0) {
tb_offset_MxK_mma2 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma2 * Mma1::Shape::kK});
tb_offset_KxN_mma2 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma2 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma2 -= k_iterations_till_diagonal_mma2;
}
} else if (kSideModeA == SideMode::kRight && kFillModeA == FillMode::kUpper) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.n() + 1) * Mma1::Shape::kN + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 < gemm_k_iterations) {
gemm_k_iterations_mma1 = k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.n()) * Mma1::Shape::kN) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 != 0) {
tb_offset_MxK_mma2 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma2 * Mma1::Shape::kK});
tb_offset_KxN_mma2 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma2 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma2 -= k_iterations_till_diagonal_mma2;
}
} else if (kSideModeA == SideMode::kLeft && kFillModeA == FillMode::kUpper) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.m()) * Mma1::Shape::kM) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 != 0) {
tb_offset_MxK_mma1 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma1 * Mma1::Shape::kK});
tb_offset_KxN_mma1 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma1 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma1 -= k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 < gemm_k_iterations) {
gemm_k_iterations_mma2 = k_iterations_till_diagonal_mma2;
}
} else if (kSideModeA == SideMode::kRight && kFillModeA == FillMode::kLower) {
int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.n()) * Mma1::Shape::kN) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma1 != 0) {
tb_offset_MxK_mma1 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma1 * Mma1::Shape::kK});
tb_offset_KxN_mma1 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma1 * Mma1::Shape::kK, 0});
gemm_k_iterations_mma1 -= k_iterations_till_diagonal_mma1;
}
int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.n() + 1) * Mma1::Shape::kN + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
if (k_iterations_till_diagonal_mma2 < gemm_k_iterations) {
gemm_k_iterations_mma2 = k_iterations_till_diagonal_mma2;
}
}
// Construct iterators to A and B operands for Mma1
typename Mma1::IteratorA iterator_A_mma1(
params.params_A_mma1,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK_mma1);
typename Mma1::IteratorB iterator_B_mma1(
params.params_B_mma1,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_KxN_mma1);
// Construct iterators to A and B operands for Mma2
typename Mma2::IteratorA iterator_A_mma2(
params.params_A_mma2,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK_mma2);
typename Mma2::IteratorB iterator_B_mma2(
params.params_B_mma2,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_KxN_mma2);
// Compute threadblock-scoped matrix multiply-add (A x B) or (B x A)
mma1(
gemm_k_iterations_mma1,
accumulators,
iterator_A_mma1,
iterator_B_mma1,
accumulators);
// Compute threadblock-scoped matrix multiply-add (AT x B) or (B x AT)
mma2(
gemm_k_iterations_mma2,
accumulators,
iterator_A_mma2,
iterator_B_mma2,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma1::Shape::kM,
threadblock_tile_offset.n() * Mma1::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 23,900 | C | 33.193133 | 138 | 0.628828 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_universal.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmUniversal {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
//
// Structures
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
typename LayoutA::Stride stride_a;
typename LayoutB::Stride stride_b;
typename LayoutC::Stride stride_c;
typename LayoutC::Stride stride_d;
typename LayoutA::Stride::LongIndex lda;
typename LayoutB::Stride::LongIndex ldb;
typename LayoutC::Stride::LongIndex ldc;
typename LayoutC::Stride::LongIndex ldd;
int const * ptr_gather_A_indices;
int const * ptr_gather_B_indices;
int const * ptr_scatter_D_indices;
//
// Methods
//
Arguments():
ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr),
ptr_gather_A_indices(nullptr),
ptr_gather_B_indices(nullptr),
ptr_scatter_D_indices(nullptr)
{}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride stride_a,
typename LayoutB::Stride stride_b,
typename LayoutC::Stride stride_c,
typename LayoutC::Stride stride_d,
int const *ptr_gather_A_indices = nullptr,
int const *ptr_gather_B_indices = nullptr,
int const *ptr_scatter_D_indices = nullptr)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C),
stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d),
ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices),
ptr_scatter_D_indices(ptr_scatter_D_indices)
{
lda = 0;
ldb = 0;
ldc = 0;
ldd = 0;
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
typename LayoutA::Stride::LongIndex lda,
typename LayoutB::Stride::LongIndex ldb,
typename LayoutC::Stride::LongIndex ldc,
typename LayoutC::Stride::LongIndex ldd,
int const *ptr_gather_A_indices = nullptr,
int const *ptr_gather_B_indices = nullptr,
int const *ptr_scatter_D_indices = nullptr
):
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd),
ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices),
ptr_scatter_D_indices(ptr_scatter_D_indices)
{
stride_a = make_Coord(lda);
stride_b = make_Coord(ldb);
stride_c = make_Coord(ldc);
stride_d = make_Coord(ldd);
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const
{
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.stride_a, args.stride_b);
std::swap(args.batch_stride_A, args.batch_stride_B);
std::swap(args.ptr_gather_A_indices, args.ptr_gather_B_indices);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>;
//
// Data members
//
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename EpilogueOutputOp::Params output_op;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int * ptr_gather_A_indices;
int * ptr_gather_B_indices;
int * ptr_scatter_D_indices;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a),
params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b),
params_C(args.ldc ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldc) : args.stride_c),
params_D(args.ldd ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldd) : args.stride_d),
output_op(args.epilogue),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(args.ptr_D),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
ptr_gather_A_indices(const_cast<int *>(args.ptr_gather_A_indices)),
ptr_gather_B_indices(const_cast<int *>(args.ptr_gather_B_indices)),
ptr_scatter_D_indices(const_cast<int *>(args.ptr_scatter_D_indices))
{}
/// Lightweight update given a subset of arguments. Problem geometry is assumed
/// to remain the same.
void update(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversal::Params::update()");
// Update input/output pointers
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
ptr_gather_A_indices = const_cast<int *>(args.ptr_gather_A_indices);
ptr_gather_B_indices = const_cast<int *>(args.ptr_gather_B_indices);
ptr_scatter_D_indices = const_cast<int *>(args.ptr_scatter_D_indices);
output_op = args.epilogue;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size)
{
CUTLASS_TRACE_HOST("GemmUniversal::can_implement()");
static int const kAlignmentA = (platform::is_same<LayoutA,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutA,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (platform::is_same<LayoutB,
layout::RowMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutB,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (platform::is_same<LayoutC,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmUniversal op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(
Params const ¶ms,
SharedStorage &shared_storage)
{
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A,
params.ptr_gather_A_indices);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B,
params.ptr_gather_B_indices);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
params.ptr_scatter_D_indices
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset,
params.ptr_scatter_D_indices
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 22,881 | C | 32.502196 | 120 | 0.623618 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemm_grouped_softmax_mainloop_fusion.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level softmax-grouped-GEMM
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/kernel/gemm_grouped_softmax_mainloop_fusion.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_complex.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/threadblock/default_mma_softmax_mainloop_fusion.h"
#include "cutlass/layout/permute.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for Scale/Bias vectors
typename ElementScaleBias_,
/// Layout type for Scale/Bias vectors
typename LayoutScaleBias_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly,
/// Operation performed by GEMM
typename Operator = typename device::DefaultGemmConfiguration<
OperatorClass, ArchTag, ElementA_, ElementB_, ElementC_,
ElementAccumulator>::Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone
>
struct DefaultGemmGroupedSoftmaxMainloopFusion {
// If true, we must construct a 'transposed-and-exchanged' Mma operator.
static bool const kInternalTranspose = platform::is_same<LayoutC_, layout::ColumnMajor>::value;
using MapArguments = kernel::detail::MapArguments<
ElementA_,
LayoutA_,
ComplexTransform::kNone,
kAlignmentA,
ElementB_,
LayoutB_,
ComplexTransform::kNone,
kAlignmentB,
LayoutC_,
kInternalTranspose
>;
private:
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMmaSoftmaxMainloopFusion<
typename MapArguments::ElementA, typename MapArguments::LayoutA, MapArguments::kAlignmentA,
typename MapArguments::ElementB, typename MapArguments::LayoutB, MapArguments::kAlignmentB,
ElementScaleBias_, LayoutScaleBias_, ElementAccumulator, layout::RowMajor, OperatorClass, ArchTag,
ThreadblockShape, WarpShape, InstructionShape, Stages, kInternalTranspose,
Operator, false, SharedMemoryClear>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount>::Epilogue;
public:
using GemmKernel = kernel::GemmGroupedSoftmaxMainloopFusion<
Mma,
Epilogue,
ThreadblockSwizzle,
GroupScheduleMode_,
kInternalTranspose
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 6,592 | C | 38.957576 | 104 | 0.677336 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_params.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
struct GemmParams {
//
// Type definitions
//
using Index = int32_t;
using LongIndex = int64_t;
using MmaIteratorParams = typename cutlass::transform::threadblock::PredicatedTileAccessIteratorParams;
using EpilogueIteratorParams = typename cutlass::epilogue::threadblock::PredicatedTileIteratorParams;
//
// Data members
//
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
// Data members for Mma::Iterator::Params
MmaIteratorParams params_itr_a;
MmaIteratorParams params_itr_b;
// Data member for Epilogue::OutputTileIterator::Params
EpilogueIteratorParams params_itr_c;
EpilogueIteratorParams params_itr_d;
GemmUniversalMode mode;
int batch_count;
int gemm_k_size;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
LongIndex lda;
LongIndex ldb;
LongIndex ldc;
LongIndex ldd;
LongIndex batch_stride_A;
LongIndex batch_stride_B;
LongIndex batch_stride_C;
LongIndex batch_stride_D;
int *semaphore;
//
// Methods
//
CUTLASS_HOST_DEVICE
GemmParams() {}
CUTLASS_HOST_DEVICE
GemmParams(
cutlass::gemm::GemmCoord problem_size_,
cutlass::gemm::GemmCoord grid_tiled_shape_,
int swizzle_log_tile_,
GemmUniversalMode mode_,
int batch_count_,
int gemm_k_size_,
void const * ptr_A_,
void const * ptr_B_,
void const * ptr_C_,
void * ptr_D_,
LongIndex lda_,
LongIndex ldb_,
LongIndex ldc_,
LongIndex ldd_,
int64_t batch_stride_A_,
int64_t batch_stride_B_,
int64_t batch_stride_C_,
int64_t batch_stride_D_,
MmaIteratorParams const & params_itr_a_,
MmaIteratorParams const & params_itr_b_,
EpilogueIteratorParams const & params_itr_c_,
EpilogueIteratorParams const & params_itr_d_,
void *workspace_ = nullptr) :
problem_size(problem_size_),
grid_tiled_shape(grid_tiled_shape_),
swizzle_log_tile(swizzle_log_tile_),
mode(mode_),
batch_count(batch_count_),
gemm_k_size(gemm_k_size_),
ptr_A(const_cast<void *>(ptr_A_)),
ptr_B(const_cast<void *>(ptr_B_)),
ptr_C(const_cast<void *>(ptr_C_)),
ptr_D(ptr_D_),
lda(lda_),
ldb(ldb_),
ldc(ldc_),
ldd(ldd_),
batch_stride_A(batch_stride_A_),
batch_stride_B(batch_stride_B_),
batch_stride_C(batch_stride_C_),
batch_stride_D(batch_stride_D_),
params_itr_a(params_itr_a_),
params_itr_b(params_itr_b_),
params_itr_c(params_itr_c_),
params_itr_d(params_itr_d_),
semaphore(static_cast<int *>(workspace_)
) { }
CUTLASS_HOST_DEVICE
void update(
void const * ptr_A_,
void const * ptr_B_,
void const * ptr_C_,
void * ptr_D_,
int64_t batch_stride_A_,
int64_t batch_stride_B_,
int64_t batch_stride_C_,
int64_t batch_stride_D_,
void *workspace_ = nullptr) {
ptr_A = const_cast<void *>(ptr_A_);
ptr_B = const_cast<void *>(ptr_B_);
ptr_C = const_cast<void *>(ptr_C_);
ptr_D = ptr_D_;
batch_stride_A = batch_stride_A_;
batch_stride_B = batch_stride_B_;
batch_stride_C = batch_stride_C_;
batch_stride_D = batch_stride_D_;
semaphore = static_cast<int *>(workspace_);
CUTLASS_TRACE_HOST("GemmParams::update()");
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 6,144 | C | 29.725 | 107 | 0.620117 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemm_planar_complex_universal.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts. Partial
specializations here choose 'device::GemmTransposed' to implement this functionality.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/kernel/gemm_planar_complex.h"
#include "cutlass/gemm/kernel/gemm_planar_complex_array.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_complex.h"
#include "cutlass/epilogue/threadblock/default_epilogue_planar_complex.h"
#include "cutlass/gemm/threadblock/default_mma_planar_complex_pipelined.h"
#include "cutlass/gemm/threadblock/default_mma_planar_complex_multistage.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Math operation performed by GEMM (e.g. arch::OpMultiplyAdd)
typename Operator,
/// Conditional enabling to switch between stages
typename Enable = void
>
struct DefaultGemmPlanarComplexUniversal;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for pipelined mainloop
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator
>
struct DefaultGemmPlanarComplexUniversal<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator,
typename platform::enable_if<(Stages <= 2)>::type
> {
/// Define planar complex valued variants instead
using Mma = typename gemm::threadblock::DefaultMmaPlanarComplexPipelined<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
LayoutC,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
Stages,
TransformA,
TransformB,
Operator
>::ThreadblockMma;
/// Planar complex epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpiloguePlanarComplex<
ThreadblockShape,
typename Mma::Policy::Operator,
OperatorClass,
ArchTag,
ThreadblockShape::kK / WarpShape::kK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
/// Define the kernel in terms of the default kernel
using GemmKernel = kernel::GemmPlanarComplex<
Mma,
Epilogue,
ThreadblockSwizzle
>;
// Array variant
using GemmArrayKernel = kernel::GemmPlanarComplexArray<
Mma,
Epilogue,
ThreadblockSwizzle
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiple pipeline stages.
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator
>
struct DefaultGemmPlanarComplexUniversal<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator,
typename platform::enable_if<(Stages > 2)>::type
> {
/// Define planar complex valued variants instead
using Mma = typename gemm::threadblock::DefaultMmaPlanarComplexMultistage<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
LayoutC,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
Stages,
TransformA,
TransformB,
Operator
>::ThreadblockMma;
/// Planar complex epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpiloguePlanarComplex<
ThreadblockShape,
typename Mma::Policy::Operator,
OperatorClass,
ArchTag,
ThreadblockShape::kK / WarpShape::kK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
/// Define the kernel in terms of the default kernel
using GemmKernel = kernel::GemmPlanarComplex<
Mma,
Epilogue,
ThreadblockSwizzle
>;
// Array variant
using GemmArrayKernel = kernel::GemmPlanarComplexArray<
Mma,
Epilogue,
ThreadblockSwizzle
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,104 | C | 30.458923 | 100 | 0.673001 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_transpose_operands.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief The universal GEMM accommodates serial reductions, parallel reductions, batched strided, and
batched array variants.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA_,
typename LayoutA_,
ComplexTransform TransformA,
int AlignmentA,
typename ElementB_,
typename LayoutB_,
ComplexTransform TransformB,
int AlignmentB,
typename LayoutC_,
bool Transpose
>
struct MapArguments {
using ElementA = ElementA_;
using LayoutA = LayoutA_;
static ComplexTransform const kTransformA = TransformA;
static int const kAlignmentA = AlignmentA;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
static ComplexTransform const kTransformB = TransformB;
static int const kAlignmentB = AlignmentB;
using LayoutC = LayoutC_;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA_,
typename LayoutA_,
ComplexTransform TransformA,
int AlignmentA,
typename ElementB_,
typename LayoutB_,
ComplexTransform TransformB,
int AlignmentB,
typename LayoutC_
>
struct MapArguments<
ElementA_,
LayoutA_,
TransformA,
AlignmentA,
ElementB_,
LayoutB_,
TransformB,
AlignmentB,
LayoutC_,
true
> {
using ElementA = ElementB_;
using LayoutA = typename layout::LayoutTranspose<LayoutB_>::type;
static ComplexTransform const kTransformA = TransformB;
static int const kAlignmentA = AlignmentB;
using ElementB = ElementA_;
using LayoutB = typename layout::LayoutTranspose<LayoutA_>::type;
static ComplexTransform const kTransformB = TransformA;
static int const kAlignmentB = AlignmentA;
using LayoutC = typename layout::LayoutTranspose<LayoutC_>::type;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 4,291 | C | 33.336 | 102 | 0.598928 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/rank_2k_grouped.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Grouped Rank2K kernel.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/trace.h"
#include "cutlass/gemm/kernel/rank_2k_transpose_operands.h"
#include "cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma1_, ///! Threadblock-scoped matrix multiply-accumulate (A*B^T)
typename Mma2_, ///! Threadblock-scoped matrix multiply-accumulate (B*A^T)
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
ComplexTransform OriginalTransformA_, ///! Public-facing transformation on A
ComplexTransform OriginalTransformB_, ///! Public-facing transformation on B
FillMode FillModeC_, ///! Fill Mode for C (kLower or kUpper)
BlasMode BlasMode_, ///! Blas3 computation mode
GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform
bool Transposed = false
>
struct Rank2KGrouped {
public:
using Mma1 = Mma1_;
using Mma2 = Mma2_;
static_assert(platform::is_same<typename Mma1::LayoutC, cutlass::layout::RowMajor>::value &&
platform::is_same<typename Mma2::LayoutC, cutlass::layout::RowMajor>::value,
"Kernel-level grouped Rank2K requires that LayoutC be row major.");
// Define generic Mma for usecases that use Kernel::Mma
using Mma = Mma1_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_;
static bool const kTransposed = Transposed;
// Public-facing type definitions related to operand element type, layout, and complex conjugate
// operation. Must interact with the 'kTransposed' notion to reflect the original layout,
// fill mode, etc. passed in.
//
// Recall that a Rank2K operation performs (A x BT) + (B x AT)
// This is performed via:
// Mma1 = (A x BT)
// Mma2 = (B x AT)
//
// However, if C needs to be transposed, then this is changed to the following:
// Mma1 = (B x AT)
// Mma2 = (A x BT)
//
// The transformation above is achieved by swapping the Layouts/Elements/Transforms/etc.
// of A and B as they are passed into the instantiations of Mma1 and Mma2.
//
// Now, given access to only Mma1 and Mma2, as well as whether a transposition has occurred,
// we wish to retrieve the original Layouts/Elements/etc. for A and B that were passed into
// the device-level call.
//
// The logic to do this (which is made clearer by referencing the above instantiations) is as follows:
// LayoutA = kTransposed ? Mma2::LayoutA : Mma1::LayoutA
// LayoutB = kTransposed ? Mma1::LayoutA : Mma2::LayoutA
//
// We achieve this swapping by passing Mma1::*A and Mma2::*B to Rank2KMapArguments:
using MapArgumentsA = kernel::detail::Rank2KMapArguments<
typename Mma1::IteratorA::Element,
typename Mma1::IteratorA::Layout,
Mma1::kTransformA,
Mma1::IteratorA::AccessType::kElements,
typename Mma2::IteratorA::Element,
typename Mma2::IteratorA::Layout,
Mma2::kTransformA,
Mma2::IteratorA::AccessType::kElements,
typename Mma1::LayoutC,
FillModeC_,
kTransposed
>;
using ElementA = typename MapArgumentsA::ElementA;
using LayoutA = typename MapArgumentsA::LayoutA;
static int const kAlignmentA = MapArgumentsA::kAlignmentA;
using MapArgumentsB = kernel::detail::Rank2KMapArguments<
typename Mma2::IteratorA::Element,
typename Mma2::IteratorA::Layout,
Mma2::kTransformA,
Mma2::IteratorA::AccessType::kElements,
typename Mma1::IteratorA::Element,
typename Mma1::IteratorA::Layout,
Mma1::kTransformA,
Mma1::IteratorA::AccessType::kElements,
typename Mma2::LayoutC,
FillModeC_,
kTransposed
>;
using ElementB = typename MapArgumentsB::ElementA;
using LayoutB = typename MapArgumentsB::LayoutA;
static int const kAlignmentB = MapArgumentsB::kAlignmentA;
// Use the user-provided TransformA and TransformB, rather than those
// resulting from MapArguments, because Mma1 and Mma2 may have different
// complex transforms than those passed in by the user.
// (See kernel/rank_2k_complex.h for an example of this)
static cutlass::ComplexTransform const kTransformA = OriginalTransformA_;
static cutlass::ComplexTransform const kTransformB = OriginalTransformB_;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename MapArgumentsA::LayoutC;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
static FillMode const kFillModeC = MapArgumentsA::kFillModeC;
// Common type definitions for Mma1 and Mma2
using Operator = typename Mma1::Operator;
using OperatorClass = typename Mma1::Operator::OperatorClass;
using ThreadblockShape = typename Mma1::Shape;
using WarpShape = typename Mma1::Operator::Shape;
using InstructionShape = typename Mma1::Policy::Operator::InstructionShape;
using ArchTag = typename Mma1::ArchTag;
static int const kStages = Mma1::kStages;
static BlasMode const kBlasMode = BlasMode_;
private:
static FillMode const kInternalFillModeC = FillModeC_;
public:
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma1::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using ProblemVisitor = Rank2KGroupedProblemVisitor<
ThreadblockShape,
kGroupScheduleMode,
kThreadCount,
kThreadCount,
kInternalFillModeC>;
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode;
GemmCoord *problem_sizes;
int problem_count;
int threadblock_count;
typename EpilogueOutputOp::Params epilogue;
ElementA ** ptr_A;
ElementB ** ptr_B;
ElementC ** ptr_C;
ElementC ** ptr_D;
typename LayoutA::Stride::LongIndex *lda;
typename LayoutB::Stride::LongIndex *ldb;
typename LayoutC::Stride::LongIndex *ldc;
typename LayoutC::Stride::LongIndex *ldd;
// Only used by device-level operator
GemmCoord *host_problem_sizes;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments():
mode(GemmUniversalMode::kGemm),
problem_count(0),
threadblock_count(0),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
lda(nullptr),
ldb(nullptr),
ldc(nullptr),
ldd(nullptr),
host_problem_sizes(nullptr)
{
}
/// Ctor
CUTLASS_HOST_DEVICE
Arguments(
GemmUniversalMode mode,
GemmCoord *problem_sizes,
int problem_count,
int threadblock_count,
typename EpilogueOutputOp::Params epilogue,
ElementA ** ptr_A,
ElementB ** ptr_B,
ElementC ** ptr_C,
ElementC ** ptr_D,
typename LayoutA::Stride::LongIndex *lda,
typename LayoutB::Stride::LongIndex *ldb,
typename LayoutC::Stride::LongIndex *ldc,
typename LayoutC::Stride::LongIndex *ldd,
GemmCoord *host_problem_sizes=nullptr
):
mode(mode),
problem_sizes(problem_sizes),
problem_count(problem_count),
threadblock_count(threadblock_count),
epilogue(epilogue),
ptr_A(ptr_A),
ptr_B(ptr_B),
ptr_C(ptr_C),
ptr_D(ptr_D),
lda(lda),
ldb(ldb),
ldc(ldc),
ldd(ldd),
host_problem_sizes(host_problem_sizes)
{
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
typename ProblemVisitor::Params problem_visitor;
int threadblock_count;
typename EpilogueOutputOp::Params output_op;
GemmUniversalMode mode;
int batch_count;
ElementA ** ptr_A;
ElementB ** ptr_B;
ElementC ** ptr_C;
ElementC ** ptr_D;
typename LayoutA::Stride::LongIndex *lda;
typename LayoutB::Stride::LongIndex *ldb;
typename LayoutC::Stride::LongIndex *ldc;
typename LayoutC::Stride::LongIndex *ldd;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
mode(cutlass::gemm::GemmUniversalMode::kGemm),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
lda(nullptr),
ldb(nullptr),
ldc(nullptr),
ldd(nullptr)
{ }
CUTLASS_HOST_DEVICE
Params(Arguments const &args, void *workspace = nullptr, int tile_count = 0):
problem_visitor(args.problem_sizes, args.problem_count, workspace, tile_count),
threadblock_count(args.threadblock_count),
output_op(args.epilogue),
ptr_A(args.ptr_A),
ptr_B(args.ptr_B),
ptr_C(args.ptr_C),
ptr_D(args.ptr_D),
lda(args.lda),
ldb(args.ldb),
ldc(args.ldc),
ldd(args.ldd)
{
}
CUTLASS_HOST_DEVICE
void update(
Arguments const &args,
void *workspace = nullptr,
int tile_count = 0) {
problem_visitor = typename ProblemVisitor::Params(args.problem_sizes, args.problem_count, workspace, tile_count);
threadblock_count = args.threadblock_count;
output_op = args.output_op;
ptr_A = args.ptr_A;
ptr_B = args.ptr_B;
ptr_C = args.ptr_C;
ptr_D = args.ptr_D;
}
};
/// Shared memory storage structure
struct SharedStorage {
union {
typename Mma1::SharedStorage mma1_main_loop;
typename Mma2::SharedStorage mma2_main_loop;
typename Epilogue::SharedStorage epilogue;
} kernel;
// ProblemVisitor shared storage can't be overlapped with others
typename ProblemVisitor::SharedStorage problem_visitor;
};
public:
//
// Methods
//
CUTLASS_DEVICE
Rank2KGrouped() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) {
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
//
// Problem visitor.
//
ProblemVisitor problem_visitor(
params.problem_visitor,
shared_storage.problem_visitor,
blockIdx.x);
// Outer 'persistent' loop to iterate over tiles
while (problem_visitor.next_tile()) {
GemmCoord problem_size = problem_visitor.problem_size();
int32_t problem_idx = problem_visitor.problem_index();
int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx());
GemmCoord grid_shape = problem_visitor.grid_shape(problem_size);
cutlass::gemm::GemmCoord threadblock_tile_offset = problem_visitor.threadblock_offset(threadblock_idx);
//
// Perform checks to determine whether the results of this threadblock will be needed.
// An example of an unneeded threadblock is one that is assigned to compute in the upper
// portion of a Rank2K kernel filled with mode kLower.
//
// TODO: Consider pushing these checks into ProblemVisitor to avoid spuriously
// returning from `next_tile()`.
//
// Early exit if threadblock is out of range
if (grid_shape.m() <= threadblock_tile_offset.m() ||
grid_shape.n() <= threadblock_tile_offset.n()) {
// Next tile
problem_visitor.advance(gridDim.x);
continue;
}
// Skip this tile if Fill Mode is Lower and
// if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal)
if (kInternalFillModeC == cutlass::FillMode::kLower &&
(threadblock_tile_offset.m() + 1) * Mma1::Shape::kM <= threadblock_tile_offset.n() * Mma1::Shape::kN) {
// Next tile
problem_visitor.advance(gridDim.x);
continue;
}
// Skip this tile if Fill Mode is Upper and
// if the entire tile is below the main diagonal (top-right corner is at or below the diagonal)
if (kInternalFillModeC == cutlass::FillMode::kUpper &&
threadblock_tile_offset.m() * Mma1::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) {
// Next tile
problem_visitor.advance(gridDim.x);
continue;
}
bool tile_on_diagonal = false;
// Mark tiles that are being crossed by the main diagonal
// (top-right and bottom-left corners are on either side of the diagonal)
if ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM > threadblock_tile_offset.n() * Mma1::Shape::kN
&& threadblock_tile_offset.m() * Mma1::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) {
tile_on_diagonal = true;
}
int offset_k = 0;
int problem_size_k = problem_size.k();
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < grid_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * problem_size.k();
}
offset_k = threadblock_tile_offset.k() * problem_size.k();
}
ElementA *ptr_A = reinterpret_cast<ElementA *>((kTransposed ? params.ptr_B[problem_idx] : params.ptr_A[problem_idx]));
typename LayoutA::Stride::LongIndex ldm_A = (kTransposed ? params.ldb[problem_idx] : params.lda[problem_idx]);
ElementB *ptr_B = reinterpret_cast<ElementB *>((kTransposed ? params.ptr_A[problem_idx] : params.ptr_B[problem_idx]));
typename LayoutB::Stride::LongIndex ldm_B = (kTransposed ? params.lda[problem_idx] : params.ldb[problem_idx]);
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_MxK{
threadblock_tile_offset.m() * Mma1::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_KxN{
offset_k,
threadblock_tile_offset.n() * Mma1::Shape::kN
};
// Assume identity swizzle
MatrixCoord tb_offset(
threadblock_tile_offset.m() * Mma1::Shape::kM,
threadblock_tile_offset.n() * Mma1::Shape::kN
);
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands for Mma1
typename Mma1::IteratorA iterator_A(
Mma1::IteratorA::Params(ldm_A),
ptr_A,
{problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK);
typename Mma1::IteratorB iterator_BT(
Mma1::IteratorB::Params(ldm_B),
ptr_B,
{problem_size_k, problem_size.n()},
thread_idx,
tb_offset_KxN);
// Construct iterators to A and B operands for Mma2
typename Mma2::IteratorA iterator_B(
Mma2::IteratorA::Params(ldm_B),
ptr_B,
{problem_size.m(), problem_size_k},
thread_idx,
tb_offset_MxK);
typename Mma2::IteratorB iterator_AT(
Mma2::IteratorB::Params(ldm_A),
ptr_A,
{problem_size_k, problem_size.n()},
thread_idx,
tb_offset_KxN);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply for Mma1 (A x BT)
Mma1 mma1(shared_storage.kernel.mma1_main_loop, thread_idx, warp_idx, lane_idx);
// Construct thread-scoped matrix multiply for Mma2 (B x AT)
Mma2 mma2(shared_storage.kernel.mma2_main_loop, thread_idx, warp_idx, lane_idx);
typename Mma1::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK;
// Wait for all threads to finish their epilogue phases from the previous tile.
__syncthreads();
// Compute threadblock-scoped matrix multiply-add (A x BT)
mma1(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_BT,
accumulators);
// HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK.
if (kBlasMode == BlasMode::kHermitian) {
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * grid_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C[problem_idx]);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D[problem_idx]);
// If TB not on diagonal, FillMode doesn't apply.
FillMode kFillModeTB = tile_on_diagonal ? kInternalFillModeC : FillMode::kNone;
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
Epilogue::OutputTileIterator::Params(params.ldc[problem_idx]),
ptr_C,
problem_size.mn(),
thread_idx,
tb_offset,
kFillModeTB
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
Epilogue::OutputTileIterator::Params(params.ldd[problem_idx]),
ptr_D,
problem_size.mn(),
thread_idx,
tb_offset,
kFillModeTB
);
Epilogue epilogue(
shared_storage.kernel.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
__syncthreads();
accumulators.clear();
}
// Compute threadblock-scoped matrix multiply-add (B x AT)
mma2(
gemm_k_iterations,
accumulators,
iterator_B,
iterator_AT,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
/* Needed for HER2K where the second HERK is multiplied by conj(alpha) */
typename EpilogueOutputOp::Params second_her2k_params(conj(params.output_op.alpha), 1);
EpilogueOutputOp output_op_her2k(second_her2k_params);
//
// Masked tile iterators constructed from members
//
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * grid_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C[problem_idx]);
// HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK.
if (kBlasMode == BlasMode::kHermitian) {
ptr_C = static_cast<ElementC *>(params.ptr_D[problem_idx]);
}
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D[problem_idx]);
// If TB not on diagonal, FillMode doesn't apply.
FillMode kFillModeTB = tile_on_diagonal ? kInternalFillModeC : FillMode::kNone;
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
Epilogue::OutputTileIterator::Params(params.ldc[problem_idx]),
ptr_C,
problem_size.mn(),
thread_idx,
tb_offset,
kFillModeTB
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
Epilogue::OutputTileIterator::Params(params.ldd[problem_idx]),
ptr_D,
problem_size.mn(),
thread_idx,
tb_offset,
kFillModeTB
);
Epilogue epilogue(
shared_storage.kernel.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
if (kBlasMode == BlasMode::kSymmetric) {
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
} else {
epilogue(
output_op_her2k,
iterator_D,
accumulators,
iterator_C);
}
// Next tile
problem_visitor.advance(gridDim.x);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 22,962 | C | 31.571631 | 124 | 0.631086 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/gemm_with_fused_epilogue.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Gemm kernel with fused reduction operation.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/layout.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool IsSingleSource = Epilogue_::kIsSingleSource
>
struct GemmWithFusedEpilogue;
// GemmWithFusedEpilogue with two sources
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmWithFusedEpilogue<Mma_, Epilogue_, ThreadblockSwizzle_, false> {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(
128 / sizeof_bits<ElementA>::value,
128 / sizeof_bits<ElementB>::value
);
//
// Structures
//
/// Argument structure
struct Arguments : UniversalArgumentsBase{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C1;
void const * ptr_C2;
void * ptr_D;
void * ptr_Vector;
void * ptr_Tensor;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C1;
int64_t batch_stride_C2;
int64_t batch_stride_Vector;
int64_t batch_stride_Tensor;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc1;
typename LayoutC::Stride::Index ldc2;
typename LayoutC::Stride::Index ldd;
typename LayoutC::Stride::Index ldr;
typename LayoutC::Stride::Index ldt;
//
// Methods
//
Arguments():
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C1(nullptr),
ptr_C2(nullptr),
ptr_D(nullptr)
{}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C1,
void const * ptr_C2,
void * ptr_D,
void * ptr_Vector,
void * ptr_Tensor,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C1,
int64_t batch_stride_C2,
int64_t batch_stride_D,
int64_t batch_stride_Vector,
int64_t batch_stride_Tensor,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc1,
typename LayoutC::Stride::Index ldc2,
typename LayoutC::Stride::Index ldd,
typename LayoutC::Stride::Index ldr,
typename LayoutC::Stride::Index ldt)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C1(ptr_C1), ptr_C2(ptr_C2), ptr_D(ptr_D),
ptr_Vector(ptr_Vector),
ptr_Tensor(ptr_Tensor),
batch_stride_A(batch_stride_A),
batch_stride_B(batch_stride_B),
batch_stride_C1(batch_stride_C1),
batch_stride_C2(batch_stride_C2),
batch_stride_Vector(batch_stride_Vector),
batch_stride_Tensor(batch_stride_Tensor),
lda(lda), ldb(ldb), ldc1(ldc1), ldc2(ldc2), ldd(ldd), ldr(ldr), ldt(ldt)
{
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size);
CUTLASS_TRACE_HOST(" ptr_Reduction: " << (void *)this->ptr_Reduction);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
CUTLASS_TRACE_HOST(" ldt: " << this->ldt);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>;
//
// Data members
//
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C1;
typename Epilogue::OutputTileIterator::Params params_C2;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::TensorTileIterator::Params params_Tensor;
typename EpilogueOutputOp::Params output_op;
void * ptr_A;
void * ptr_B;
void * ptr_C1;
void * ptr_C2;
void * ptr_D;
void * ptr_Vector;
typename LayoutC::Stride::Index ldr;
void * ptr_Tensor;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C1;
int64_t batch_stride_C2;
int64_t batch_stride_Vector;
int64_t batch_stride_Tensor;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A(args.lda),
params_B(args.ldb),
params_C1(args.ldc1),
params_C2(args.ldc2),
params_D(args.ldd),
params_Tensor(args.ldt),
output_op(args.epilogue),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C1(const_cast<void *>(args.ptr_C1)),
ptr_C2(const_cast<void *>(args.ptr_C2)),
ptr_D(args.ptr_D),
ptr_Vector(args.ptr_Vector),
ldr(args.ldr),
ptr_Tensor(args.ptr_Tensor),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C1(args.batch_stride_C1),
batch_stride_C2(args.batch_stride_C2),
batch_stride_Vector(args.batch_stride_Vector),
batch_stride_Tensor(args.batch_stride_Tensor)
{
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Params::Params() - problem_size: " << problem_size);
CUTLASS_TRACE_HOST(" ptr_Reduction: " << (void *)this->ptr_Reduction);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
CUTLASS_TRACE_HOST(" ldt: " << args.ldt);
}
/// Lightweight update given a subset of arguments. Problem geometry is assumed
/// to remain the same.
CUTLASS_HOST_DEVICE
void update(Arguments const &args)
{
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C1 = const_cast<void *>(args.ptr_C1);
ptr_C2 = const_cast<void *>(args.ptr_C2);
ptr_D = args.ptr_D;
ptr_Vector = args.ptr_Vector;
ldr = args.ldr;
ptr_Tensor = args.ptr_Tensor;
output_op = args.epilogue;
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Params::update()");
CUTLASS_TRACE_HOST(" ptr_Reduction: " << (void *)this->ptr_Reduction);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::can_implement()");
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmWithFusedEpilogue op;
op(params, shared_storage);
}
#define SPLIT_K_ENABLED 1
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
#if SPLIT_K_ENABLED
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
#endif
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C1 = static_cast<ElementC *>(params.ptr_C1);
ElementC *ptr_C2 = static_cast<ElementC *>(params.ptr_C2);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor);
// Define the reduction output pointer and move to the appropriate place
typename Epilogue::ElementVector *ptr_Vector =
static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
//
// Fetch pointers based on mode.
//
//
// Special path when split-K not enabled.
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() == 1) {
// Tile iterators loading from source tensors.
typename Epilogue::OutputTileIterator iterator_C1(
params.params_C1,
ptr_C1,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_C2(
params.params_C2,
ptr_C2,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(
params.params_Tensor,
// Only the final block outputs Tensor
ptr_Tensor,
params.problem_size.mn(),
thread_idx,
threadblock_offset);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr;
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op,
ptr_Vector,
iterator_D,
accumulators,
iterator_C1,
iterator_C2,
tensor_iterator,
params.problem_size.mn(),
threadblock_offset);
return;
}
//
// Slower path when split-K or batching is needed
//
#if SPLIT_K_ENABLED
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C1 += threadblock_tile_offset.k() * params.batch_stride_C1;
if (ptr_C2) {
ptr_C2 += threadblock_tile_offset.k() * params.batch_stride_C2;
}
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
if (ptr_Tensor) {
ptr_Tensor += threadblock_tile_offset.k() * params.batch_stride_Tensor;
}
if (ptr_Vector) {
ptr_Vector += threadblock_tile_offset.k() * params.batch_stride_Vector;
}
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C1 = static_cast<ElementC * const *>(params.ptr_C1)[threadblock_tile_offset.k()];
if (ptr_C2) {
ptr_C2 = static_cast<ElementC * const *>(params.ptr_C2)[threadblock_tile_offset.k()];
}
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
if (ptr_Tensor) {
ptr_Tensor = static_cast<typename Epilogue::ElementTensor * const *>(params.ptr_Tensor)[threadblock_tile_offset.k()];
}
if (ptr_Vector) {
ptr_Vector = static_cast<typename Epilogue::ElementVector * const *>(params.ptr_Vector)[threadblock_tile_offset.k()];
}
}
#endif
// Tile iterators loading from source tensors.
typename Epilogue::OutputTileIterator iterator_C1(
params.params_C1,
ptr_C1,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
typename Epilogue::OutputTileIterator iterator_C2(
params.params_C2,
ptr_C2,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(
params.params_Tensor,
// Only the final block outputs Tensor
((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1))
? nullptr
: ptr_Tensor,
params.problem_size.mn(),
thread_idx,
threadblock_offset);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
#if SPLIT_K_ENABLED
// Wait on the semaphore - this latency may have been covered by iterator construction
if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C1 = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
#endif
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr;
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op,
// Only the final block uses Vector
((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1))
? nullptr
: ptr_Vector,
iterator_D,
accumulators,
iterator_C1,
iterator_C2,
tensor_iterator,
params.problem_size.mn(),
threadblock_offset);
//
// Release the semaphore
//
#if SPLIT_K_ENABLED
if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
#endif
}
};
// GemmWithFusedEpilogue with one source
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmWithFusedEpilogue<Mma_, Epilogue_, ThreadblockSwizzle_, true> {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(
128 / sizeof_bits<ElementA>::value,
128 / sizeof_bits<ElementB>::value
);
//
// Structures
//
/// Argument structure
struct Arguments : UniversalArgumentsBase
{
//
// Data members
//
typename EpilogueOutputOp::Params epilogue;
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
void * ptr_Vector;
void * ptr_Tensor;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_Vector;
int64_t batch_stride_Tensor;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc;
typename LayoutC::Stride::Index ldd;
typename LayoutC::Stride::Index ldr;
typename LayoutC::Stride::Index ldt;
//
// Methods
//
Arguments():
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr)
{}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
void * ptr_Vector,
void * ptr_Tensor,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
int64_t batch_stride_Vector,
int64_t batch_stride_Tensor,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldd,
typename LayoutC::Stride::Index ldr,
typename LayoutC::Stride::Index ldt)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
epilogue(epilogue),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
ptr_Vector(ptr_Vector),
ptr_Tensor(ptr_Tensor),
batch_stride_A(batch_stride_A),
batch_stride_B(batch_stride_B),
batch_stride_C(batch_stride_C),
batch_stride_Vector(batch_stride_Vector),
batch_stride_Tensor(batch_stride_Tensor),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ldr(ldr), ldt(ldt)
{
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size);
CUTLASS_TRACE_HOST(" ptr_Reduction: " << (void *)this->ptr_Reduction);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
CUTLASS_TRACE_HOST(" ldt: " << this->ldt);
}
/// Returns arguments for the transposed problem
Arguments transposed_problem() const {
Arguments args(*this);
std::swap(args.problem_size.m(), args.problem_size.n());
std::swap(args.ptr_A, args.ptr_B);
std::swap(args.lda, args.ldb);
std::swap(args.batch_stride_A, args.batch_stride_B);
return args;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC>;
//
// Data members
//
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::TensorTileIterator::Params params_Tensor;
typename EpilogueOutputOp::Params output_op;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
void * ptr_Vector;
typename LayoutC::Stride::Index ldr;
void * ptr_Tensor;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_Vector;
int64_t batch_stride_Tensor;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A(args.lda),
params_B(args.ldb),
params_C(args.ldc),
params_D(args.ldd),
params_Tensor(args.ldt),
output_op(args.epilogue),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(args.ptr_D),
ptr_Vector(args.ptr_Vector),
ldr(args.ldr),
ptr_Tensor(args.ptr_Tensor),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_Vector(args.batch_stride_Vector),
batch_stride_Tensor(args.batch_stride_Tensor)
{
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Params::Params() - problem_size: " << problem_size);
CUTLASS_TRACE_HOST(" ptr_Reduction: " << (void *)this->ptr_Reduction);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
CUTLASS_TRACE_HOST(" ldt: " << args.ldt);
}
/// Lightweight update given a subset of arguments. Problem geometry is assumed
/// to remain the same.
CUTLASS_HOST_DEVICE
void update(Arguments const &args)
{
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
ptr_C = const_cast<void *>(args.ptr_C);
ptr_D = args.ptr_D;
ptr_Vector = args.ptr_Vector;
ldr = args.ldr;
ptr_Tensor = args.ptr_Tensor;
output_op = args.epilogue;
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::Params::update()");
CUTLASS_TRACE_HOST(" ptr_Reduction: " << (void *)this->ptr_Reduction);
CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
CUTLASS_TRACE_HOST("GemmWithFusedEpilogue::can_implement()");
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmWithFusedEpilogue op;
op(params, shared_storage);
}
#define SPLIT_K_ENABLED 1
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
#if SPLIT_K_ENABLED
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
#endif
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor);
// Define the reduction output pointer and move to the appropriate place
typename Epilogue::ElementVector *ptr_Vector =
static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
//
// Fetch pointers based on mode.
//
//
// Special path when split-K not enabled.
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() == 1) {
// Tile iterators loading from source tensors.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(
params.params_Tensor,
// Only the final block outputs Tensor
ptr_Tensor,
params.problem_size.mn(),
thread_idx,
threadblock_offset);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr;
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op,
ptr_Vector,
iterator_D,
accumulators,
iterator_C,
tensor_iterator,
params.problem_size.mn(),
threadblock_offset);
return;
}
//
// Slower path when split-K or batching is needed
//
#if SPLIT_K_ENABLED
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
if (ptr_Tensor) {
ptr_Tensor += threadblock_tile_offset.k() * params.batch_stride_Tensor;
}
if (ptr_Vector) {
ptr_Vector += threadblock_tile_offset.k() * params.batch_stride_Vector;
}
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
if (ptr_Tensor) {
ptr_Tensor = static_cast<typename Epilogue::ElementTensor * const *>(params.ptr_Tensor)[threadblock_tile_offset.k()];
}
if (ptr_Vector) {
ptr_Vector = static_cast<typename Epilogue::ElementVector * const *>(params.ptr_Vector)[threadblock_tile_offset.k()];
}
}
#endif
// Tile iterators loading from source tensors.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(
params.params_Tensor,
// Only the final block outputs Tensor
((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1))
? nullptr
: ptr_Tensor,
params.problem_size.mn(),
thread_idx,
threadblock_offset);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
#if SPLIT_K_ENABLED
// Wait on the semaphore - this latency may have been covered by iterator construction
if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
#endif
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr;
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op,
// Only the final block uses Vector
((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1))
? nullptr
: ptr_Vector,
iterator_D,
accumulators,
iterator_C,
tensor_iterator,
params.problem_size.mn(),
threadblock_offset);
//
// Release the semaphore
//
#if SPLIT_K_ENABLED
if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 47,222 | C | 30.735887 | 125 | 0.633434 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemm_universal.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts. Partial
specializations here choose 'device::GemmTransposed' to implement this functionality.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/kernel/gemm_universal.h"
#include "cutlass/gemm/kernel/gemm_universal_streamk.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_complex.h"
#include "cutlass/layout/permute.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
/// Gather operand A by using an index array
bool GatherA = false,
/// Gather operand B by using an index array
bool GatherB = false,
/// Scatter result D by using an index array
bool ScatterD = false,
/// Permute result D
typename PermuteDLayout = layout::NoPermute,
///
typename Enable = void
>
struct DefaultGemmUniversal;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Real-valued GEMM kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
/// Gather operand A by using an index array
bool GatherA,
/// Gather operand B by using an index array
bool GatherB,
/// Scatter result D by using an index array
bool ScatterD,
/// Permute result D
typename PermuteDLayout
>
struct DefaultGemmUniversal<
ElementA,
LayoutA,
ComplexTransform::kNone, // transform A
kAlignmentA,
ElementB,
LayoutB,
ComplexTransform::kNone, // transform B
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator,
SharedMemoryClear,
GatherA,
GatherB,
ScatterD,
PermuteDLayout,
typename platform::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultGemmKernel = typename kernel::DefaultGemm<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
true,
Operator,
SharedMemoryClear,
GatherA,
GatherB,
ScatterD,
PermuteDLayout
>::GemmKernel;
/// Universal kernel without StreamkFeature member type
template <class SwizzleT, class Enable = void>
class SelectBase :
public kernel::GemmUniversal<
typename DefaultGemmKernel::Mma,
typename DefaultGemmKernel::Epilogue,
SwizzleT>
{};
/// Universal kernel with StreamkFeature member type
template <class SwizzleT>
class SelectBase<SwizzleT, typename SwizzleT::StreamkFeature> :
public kernel::GemmUniversalStreamk<
typename DefaultGemmKernel::Mma,
typename DefaultGemmKernel::Epilogue,
SwizzleT>
{};
/// Select kernel by ThreadblockSwizzle's support for StreamkFeature
using GemmKernel = SelectBase<ThreadblockSwizzle>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex-valued GEMM kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear
>
struct DefaultGemmUniversal<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator,
SharedMemoryClear,
false,
false,
false,
layout::NoPermute,
typename platform::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultGemmKernel = typename kernel::DefaultGemmComplex<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
TransformA,
TransformB,
Operator,
false
>::GemmKernel;
/// Universal kernel without StreamkFeature member type
template <class SwizzleT, class Enable = void>
class SelectBase :
public kernel::GemmUniversal<
typename DefaultGemmKernel::Mma,
typename DefaultGemmKernel::Epilogue,
SwizzleT>
{};
/// Universal kernel with StreamkFeature member type
template <class SwizzleT>
class SelectBase<SwizzleT, typename SwizzleT::StreamkFeature> :
public kernel::GemmUniversalStreamk<
typename DefaultGemmKernel::Mma,
typename DefaultGemmKernel::Epilogue,
SwizzleT>
{};
/// Select kernel by ThreadblockSwizzle's support for StreamkFeature
using GemmKernel = SelectBase<ThreadblockSwizzle>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,951 | C | 30.206266 | 100 | 0.672161 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemm_with_broadcast.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Defines a GEMM with Reduction based on an existing UniversalGemm kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/kernel/gemm_with_fused_epilogue.h"
#include "cutlass/gemm/kernel/default_gemm_universal.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h"
#include "cutlass/epilogue/threadblock/epilogue_with_broadcast.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
///
typename Enable = void
>
struct DefaultGemmWithBroadcast {
using GemmBase = typename DefaultGemmUniversal<
ElementA_, LayoutA_, TransformA, kAlignmentA,
ElementB_, LayoutB_, TransformB, kAlignmentB,
ElementC_, LayoutC_, ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator
>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithBroadcastTensorOp<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementC_,
typename EpilogueOutputOp::ElementT,
ElementC_,
EpilogueOutputOp,
GemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = GemmWithFusedEpilogue<
typename GemmBase::Mma,
Epilogue,
ThreadblockSwizzle
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parital specialization: ArchTag = cutlass::arch::Sm70
///
///
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
///
typename Enable
>
struct DefaultGemmWithBroadcast<
ElementA_, LayoutA_, TransformA, kAlignmentA,
ElementB_, LayoutB_, TransformB, kAlignmentB,
ElementC_, LayoutC_,
ElementAccumulator,
OperatorClass,
cutlass::arch::Sm70,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator,
Enable
> {
using GemmBase = typename DefaultGemmUniversal<
ElementA_, LayoutA_, TransformA, kAlignmentA,
ElementB_, LayoutB_, TransformB, kAlignmentB,
ElementC_, LayoutC_, ElementAccumulator,
OperatorClass,
cutlass::arch::Sm70,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator
>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithBroadcastVoltaTensorOp<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementC_,
typename EpilogueOutputOp::ElementT,
ElementC_,
EpilogueOutputOp,
GemmBase::Epilogue::kElementsPerAccess
>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = GemmWithFusedEpilogue<
typename GemmBase::Mma,
Epilogue,
ThreadblockSwizzle
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,063 | C | 32.04918 | 102 | 0.679648 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/kernel/default_gemm_with_k_reduction.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts. Partial
specializations here choose 'device::GemmTransposed' to implement this functionality.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_with_k_reduction.h"
#include "cutlass/gemm/threadblock/default_mma_with_reduction.h"
#include "cutlass/gemm/threadblock/default_mma_core_with_reduction.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/epilogue_gemm_k_reduction.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Reduce A or B along the K dimension
bool ReduceKForA_,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
///
typename Enable = void>
struct DefaultGemmWithKReduction {
static const bool kReduceKForA = (platform::is_same<LayoutC, cutlass::layout::RowMajor>::value) ? ReduceKForA_ : !ReduceKForA_;
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultMmaWithReduction<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, kReduceKForA, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator, false, SharedMemoryClear>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount>::Epilogue;
/// Define the epilogue of the reduction vector
using EpilogueGemmKReduction =
typename cutlass::epilogue::threadblock::EpilogueGemmKReduction<
ElementAccumulator, ElementC, ThreadblockShape, typename Mma::Operator, kReduceKForA>;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::GemmWithKReduction<Mma, Epilogue, EpilogueGemmKReduction, ThreadblockSwizzle>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 6,457 | C | 41.768212 | 129 | 0.695834 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/conv2d_problem_size.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This file contains definitions and utility functions for describing convolution problem sizes.
Conv2dProblem desciption:
activation (NHWC),
filter (KRSC),
output (NPQK),
pading (pad_h, pad_w),
stride (stride_h, stride_w),
dilation (dilation_h, dilation_w).
Free functions to map:
Map tensor extents (Conv2d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_extent(ConvolutionOperator)
Map tensor sizes (Conv2d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_size(ConvolutionOperator)
Map tensor problem sizes (Conv2d -> ImplicitGemm): implicit_gemm_problem_size(ConvolutionOperator)
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cmath>
#else
#include <cmath>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/functional.h"
namespace cutlass {
namespace conv {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Problem size structure
struct Conv2dProblemSize {
// Conv2d strictly problem size parameters
int N, H, W, C, P, Q, K, R, S;
int pad_h, pad_w;
int stride_h, stride_w;
int dilation_h, dilation_w;
Mode mode;
// Conv2d implementation-related parameters
int split_k_slices;
int groups;
//
// Methods
//
public:
CUTLASS_HOST_DEVICE
Conv2dProblemSize():
N(0), H(0), W(0), C(0), P(0), Q(0), K(0), R(0), S(0),
pad_h(0), pad_w(0), stride_h(1), stride_w(1), dilation_h(1), dilation_w(1),
mode(Mode::kConvolution), split_k_slices(1), groups(1) { }
/// Constructor for default padding, stride, dilation, and split-K
CUTLASS_HOST_DEVICE
Conv2dProblemSize(
int N,
int H,
int W,
int C,
int P,
int Q,
int K,
int R,
int S,
Mode mode
):
N(N), H(H), W(W), C(C), P(P), Q(Q), K(K), R(R), S(S),
pad_h(R / 2), pad_w(S / 2), stride_h(1), stride_w(1), dilation_h(1), dilation_w(1),
mode(mode), split_k_slices(1), groups (1) { }
/// Constructor
CUTLASS_HOST_DEVICE
Conv2dProblemSize(
int N,
int H,
int W,
int C,
int K,
int R,
int S,
int P,
int Q,
int pad_h,
int pad_w,
int stride_h,
int stride_w,
int dilation_h,
int dilation_w,
Mode mode,
int split_k_slices = 1,
int groups = 1
):
N(N), H(H), W(W), C(C), K(K), R(R), S(S), P(P), Q(Q),
pad_h(pad_h), pad_w(pad_w), stride_h(stride_h), stride_w(stride_w),
dilation_h(dilation_h), dilation_w(dilation_w),
mode(mode), split_k_slices(split_k_slices), groups (groups) { }
/// Constructs convolution problem size from cutlass Tensor4DCoord and MatrixCoord
// set user-defined output size and sets P and Q (include all data members in ctor)
CUTLASS_HOST_DEVICE
Conv2dProblemSize(
cutlass::Tensor4DCoord input_size, // NHWC
cutlass::Tensor4DCoord filter_size, // KRSC
cutlass::Tensor4DCoord padding, // pad_h, _, pad_w, _
cutlass::MatrixCoord stride, // stride_h, stride_w
cutlass::MatrixCoord dilation, // dilation_h, dilation_w
cutlass::Tensor4DCoord output_size, // NPQK
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation,
int split_k_slices = 1,
int groups = 1
):
N(input_size.n()), H(input_size.h()), W(input_size.w()), C(input_size.c()),
K(filter_size.n()), R(filter_size.h()), S(filter_size.w()),
pad_h(padding[0]), pad_w(padding[2]),
stride_h(stride.row()), stride_w(stride.column()),
dilation_h(dilation.row()), dilation_w(dilation.column()),
P(output_size.h()), Q(output_size.w()),
mode(mode), split_k_slices(split_k_slices), groups(groups) {}
/// Constructs convolution problem size from cutlass Tensor4DCoord and MatrixCoord
// computes output size and sets P and Q (skip output from ctor arguments)
CUTLASS_HOST_DEVICE
Conv2dProblemSize(
cutlass::Tensor4DCoord input_size, // NHWC
cutlass::Tensor4DCoord filter_size, // KRSC
cutlass::Tensor4DCoord padding, // pad_h, _, pad_w, _
cutlass::MatrixCoord stride, // stride_h, stride_w
cutlass::MatrixCoord dilation, // dilation_h, dilation_w
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation,
int split_k_slices = 1,
int groups = 1
):
N(input_size.n()), H(input_size.h()), W(input_size.w()), C(input_size.c()),
K(filter_size.n()), R(filter_size.h()), S(filter_size.w()),
pad_h(padding[0]), pad_w(padding[2]),
stride_h(stride.row()), stride_w(stride.column()),
dilation_h(dilation.row()), dilation_w(dilation.column()),
mode(mode), split_k_slices(split_k_slices), groups(groups) {
// set output P and Q
P = ((H + pad_h * 2 - R * dilation_h) / stride_h) + 1;
Q = ((W + pad_w * 2 - S * dilation_w) / stride_w) + 1;
}
/// Constructs convolution problem size from cutlass Tensor4DCoord and MatrixCoord
// set user-defined output size and sets P and Q (skip padding, striding, and dilation)
CUTLASS_HOST_DEVICE
Conv2dProblemSize(
cutlass::Tensor4DCoord input_size, // NHWC
cutlass::Tensor4DCoord filter_size, // KRSC
cutlass::Tensor4DCoord output_size, // NPQK
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation,
int split_k_slices = 1,
int groups = 1
):
N(input_size.n()), H(input_size.h()), W(input_size.w()), C(input_size.c()),
K(filter_size.n()), R(filter_size.h()), S(filter_size.w()),
P(output_size.h()), Q(output_size.w()),
pad_h(R / 2), pad_w(S / 2), stride_h(1), stride_w(1),
dilation_h(1), dilation_w(1),
mode(mode), split_k_slices(split_k_slices), groups(groups) {}
// Reset covolution mode in the problem
CUTLASS_HOST_DEVICE
Conv2dProblemSize reset_mode(cutlass::conv::Mode mode_) {
Conv2dProblemSize tmp(*this);
tmp.mode = mode_;
return tmp;
}
// Reset covolution mode in the problem
CUTLASS_HOST_DEVICE
Conv2dProblemSize reset_split_k_slices(int split_k_slices_) {
Conv2dProblemSize tmp(*this);
tmp.split_k_slices = split_k_slices_;
return tmp;
}
/// Equality operator (ignores mode and split_k_slice)
CUTLASS_HOST_DEVICE
bool operator==(Conv2dProblemSize const &conv) const {
return (
(N == conv.N) && (H == conv.H) && (W == conv.W) && (C == conv.C) &&
(K == conv.K) && (R == conv.R) && (S == conv.S) &&
(P == conv.P) && (Q == conv.Q) &&
(pad_h == conv.pad_h) && (pad_w == conv.pad_w) &&
(stride_h == conv.stride_h) && (stride_w == conv.stride_w) &&
(dilation_h == conv.dilation_h) && (dilation_w == conv.dilation_w)
);
}
/// Inequality operator
CUTLASS_HOST_DEVICE
bool operator!=(Conv2dProblemSize const &rhs) const {
return !(*this == rhs);
}
/// Returns activation extent as Tensor4DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor4DCoord activation_extent() const {
return cutlass::Tensor4DCoord ({N, H, W, C});
}
/// Returns filter extent as Tensor4DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor4DCoord filter_extent() const {
return cutlass::Tensor4DCoord ({K, R, S, C / groups});
}
/// Returns output extent as Tensor4DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor4DCoord output_extent() const {
return cutlass::Tensor4DCoord ({N, P, Q, K});
}
/// Returns activation size in number of elements
CUTLASS_HOST_DEVICE
int64_t activation_size() const {
return (N * H * W * C);
}
/// Returns filter size in number of elements
CUTLASS_HOST_DEVICE
int64_t filter_size() const {
return (K * R * S * C / groups);
}
/// Returns output size in number of elements
CUTLASS_HOST_DEVICE
int64_t output_size() const {
return (N * P * Q * K);
}
/// Returns padding as Tensor4DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor4DCoord padding() const {
return cutlass::Tensor4DCoord ({pad_h, pad_h, pad_w, pad_w});
}
/// Returns stride as MatrixCoord
CUTLASS_HOST_DEVICE
cutlass::MatrixCoord stride() const {
return cutlass::MatrixCoord ({stride_h, stride_w});
}
/// Returns dilation as MatrixCoord
CUTLASS_HOST_DEVICE
cutlass::MatrixCoord dilation() const {
return cutlass::MatrixCoord ({dilation_h, dilation_w});
}
/////////////////////////////////////////////////////////////////
// Methods used for strided dgrad implementation
/////////////////////////////////////////////////////////////////
/// Number of filter r positions to accumulate in gemm-k dim
CUTLASS_HOST_DEVICE
int num_gemm_k_filter_r(int r) const {
return ((R - r + stride_h - 1) / stride_h);
}
/// Number of filter s positions to accumulate in gemm-k dim
CUTLASS_HOST_DEVICE
int num_gemm_k_filter_s(int s) const {
return ((S - s + stride_w - 1) / stride_w);
}
/// Number of filter positions to accumulate in gemm-k dim
CUTLASS_HOST_DEVICE
int num_gemm_k_filter_positions(int r, int s) const {
return num_gemm_k_filter_r(r) * num_gemm_k_filter_s(s);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// ImplicitGemm helper functions //
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Determine the problem size of the implicit GEMM operation
CUTLASS_HOST_DEVICE
cutlass::gemm::GemmCoord implicit_gemm_problem_size(
Operator conv_operator,
Conv2dProblemSize const &problem_size) {
// Compute problem size
switch (conv_operator) {
case Operator::kFprop:
return gemm::GemmCoord(
problem_size.N * problem_size.P * problem_size.Q,
problem_size.K,
problem_size.R * problem_size.S * problem_size.C / problem_size.groups
);
case Operator::kDgrad:
return gemm::GemmCoord(
problem_size.N * problem_size.H * problem_size.W,
problem_size.C,
problem_size.R * problem_size.S * problem_size.K
);
case Operator::kWgrad:
return gemm::GemmCoord(
problem_size.K,
problem_size.R * problem_size.S * problem_size.C,
problem_size.N * problem_size.P * problem_size.Q
);
default:
break;
}
return gemm::GemmCoord();
}
// Determine the number of gemm_k iterations for conv2d problem using implicit gemm algorithm
CUTLASS_HOST_DEVICE
int implicit_gemm_k_iterations(
Operator conv_operator,
int threadblock_K,
Conv2dProblemSize const &problem_size,
IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic,
GroupMode group_mode = GroupMode::kNone,
int threadblock_N = 0) {
int iterations = 0;
if (group_mode == GroupMode::kNone) {
if (algorithm == IteratorAlgorithm::kFixedChannels) {
int positions_per_iteration = threadblock_K / problem_size.C;
switch (conv_operator) {
case Operator::kFprop:
iterations = (problem_size.R * problem_size.S + positions_per_iteration - 1 ) / positions_per_iteration;
break;
default:
break;
}
}
else if (algorithm == IteratorAlgorithm::kFewChannels) {
switch (conv_operator) {
case Operator::kFprop:
iterations = (problem_size.R * problem_size.S * problem_size.C + threadblock_K - 1 ) / threadblock_K;
break;
default:
break;
}
}
else {
int elements_per_split_k_slice = 0;
switch (conv_operator) {
case Operator::kFprop:
elements_per_split_k_slice = (problem_size.C + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K);
break;
case Operator::kDgrad:
elements_per_split_k_slice = (problem_size.K + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K);
break;
case Operator::kWgrad:
elements_per_split_k_slice = (problem_size.N * problem_size.P * problem_size.Q + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = (elements_per_split_k_slice + threadblock_K - 1) / threadblock_K;
break;
default:
break;
}
}
} else if (group_mode == GroupMode::kDepthwise) {
int channels_per_cta = threadblock_N;
if (algorithm == IteratorAlgorithm::kAnalytic) {
switch (conv_operator) {
case Operator::kFprop:
iterations = problem_size.R * problem_size.S *
((channels_per_cta + threadblock_K - 1) / threadblock_K);
break;
default:
break;
}
}
} else { // Group conv
int channels_per_group = problem_size.C / problem_size.groups;
int k_per_group = problem_size.K / problem_size.groups;
if (algorithm == IteratorAlgorithm::kAnalytic) {
switch (conv_operator) {
case Operator::kFprop:
iterations = problem_size.R * problem_size.S * ((channels_per_group + threadblock_K - 1) / threadblock_K);
// In group conv, if k_per_group < threadblock_N, one Threadblock will calculate multiple groups
if (problem_size.groups != 1) {
if (k_per_group < threadblock_N) {
iterations *= threadblock_N / k_per_group;
}
}
break;
default:
break;
}
} else if (algorithm == IteratorAlgorithm::kOptimized) {
// Current optimized iterator only support GroupMode::kSingleGroup
if (group_mode == GroupMode::kSingleGroup) {
switch (conv_operator) {
case Operator::kFprop:
iterations = problem_size.R * problem_size.S * ((channels_per_group + threadblock_K - 1) / threadblock_K);
break;
default:
break;
}
}
}
}
return iterations;
}
template <int N = 1, int Output_P = 1, int Output_Q = 1>
CUTLASS_HOST_DEVICE
int depthwise_gemm_k_iterations(
Operator conv_operator,
int threadblock_K,
Conv2dProblemSize const &problem_size,
IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic,
GroupMode group_mode = GroupMode::kNone,
int threadblock_N = 0) {
int n = problem_size.N;
int p = (problem_size.P + Output_P - 1) / Output_P;
int q = (problem_size.Q + Output_Q - 1) / Output_Q;
int iterations = (n * p * q + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
return iterations;
}
CUTLASS_HOST_DEVICE
int implicit_gemm_k_iterations_per_channel(
Operator conv_operator,
int threadblock_K,
Conv2dProblemSize const &problem_size,
IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic) {
int iterations = 0; //0 means not applicable
if (algorithm == IteratorAlgorithm::kAnalytic || algorithm == IteratorAlgorithm::kOptimized) {
switch (conv_operator) {
case Operator::kFprop:
iterations = problem_size.R * problem_size.S;
break;
case Operator::kDgrad:
iterations = problem_size.R * problem_size.S;
break;
default:
break;
}
}
return iterations;
}
////////////////////////////////////////////////////////////////////////////////
// Mapping function (ImplicitGemm A, B, C -> Conv Activation, Filter, Output)
////////////////////////////////////////////////////////////////////////////////
/// Returns ImplicitGemm tensor A extent as Tensor4DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor4DCoord implicit_gemm_tensor_a_extent(
Operator conv_operator,
Conv2dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.activation_extent();
case cutlass::conv::Operator::kDgrad: return problem_size.output_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.output_extent();
default : break;
}
return cutlass::Tensor4DCoord();
}
/// Returns ImplicitGemm tensor B extent as Tensor4DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor4DCoord implicit_gemm_tensor_b_extent(
Operator conv_operator,
Conv2dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.filter_extent();
case cutlass::conv::Operator::kDgrad: return problem_size.filter_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.activation_extent();
default : break;
}
return cutlass::Tensor4DCoord();
}
/// Returns ImplicitGemm tensor C extent as Tensor4DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor4DCoord implicit_gemm_tensor_c_extent(
Operator conv_operator,
Conv2dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.output_extent();
case cutlass::conv::Operator::kDgrad: return problem_size.activation_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.filter_extent();
default : break;
}
return cutlass::Tensor4DCoord();
}
/// Returns ImplicitGemm tensor A size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_a_size(
Operator conv_operator,
Conv2dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.activation_size();
case cutlass::conv::Operator::kDgrad: return problem_size.output_size();
case cutlass::conv::Operator::kWgrad: return problem_size.output_size();
default : break;
}
return 0;
}
/// Returns ImplicitGemm tensor B size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_b_size(
Operator conv_operator,
Conv2dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.filter_size();
case cutlass::conv::Operator::kDgrad: return problem_size.filter_size();
case cutlass::conv::Operator::kWgrad: return problem_size.activation_size();
default : break;
}
return 0;
}
/// Returns ImplicitGemm tensor C size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_c_size(
Operator conv_operator,
Conv2dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.output_size();
case cutlass::conv::Operator::kDgrad: return problem_size.activation_size();
case cutlass::conv::Operator::kWgrad: return problem_size.filter_size();
default : break;
}
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
// Strided dgrad helper functions //
////////////////////////////////////////////////////////////////////////////////////////////////////
// Returns number of CTAs tile M to cover valid MMAs per starting filter postion
CUTLASS_HOST_DEVICE
int strided_dgrad_tile_m_per_filter(
Conv2dProblemSize const &problem_size,
int tile_size_m) {
// Compute NHW rows in Dx output that needs MMA per starting filter position
int rows_h_per_filter = (problem_size.H + problem_size.stride_h - 1) / problem_size.stride_h;
int rows_w_per_filter = (problem_size.W + problem_size.stride_w - 1) / problem_size.stride_w;
int rows_nhw_per_filter = problem_size.N * rows_h_per_filter * rows_w_per_filter;
// Number of CTAs tile M to cover valid MMAs per starting filter postion
int tile_m_per_filter = (rows_nhw_per_filter + tile_size_m - 1) / tile_size_m;
return tile_m_per_filter;
}
// Computes starting Dx coord (h, w) for given starting filter postion
CUTLASS_HOST_DEVICE
void strided_dgrad_starting_coords(
Conv2dProblemSize const &problem_size,
FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod,
int r, int s,
int &start_h, int &start_w) {
// function locals for remainder by fast divmod
int pad_h_rem_, pad_w_rem_;
// start_h = std::abs(problem_size.stride_h - ((problem_size.pad_h % problem_size.stride_h) - r)) % problem_size.stride_h;
stride_h_divmod.divmod(pad_h_rem_, problem_size.pad_h);
int r_ = absolute_value(problem_size.stride_h - (pad_h_rem_ - r));
stride_h_divmod.divmod(start_h, r_);
//start_w = std::abs(problem_size.stride_w - ((problem_size.pad_w % problem_size.stride_w) - s)) % problem_size.stride_w;
stride_w_divmod.divmod(pad_w_rem_, problem_size.pad_w);
int s_ = absolute_value(problem_size.stride_w - (pad_w_rem_ - s));
stride_w_divmod.divmod(start_w, s_);
}
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| 22,725 | C | 33.80245 | 152 | 0.623982 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/conv3d_problem_size.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This file contains definitions and utility functions for describing convolution problem sizes.
Conv3dProblem desciption:
activation (NDHWC),
filter (KTRSC),
output (NZPQK),
pading (pad_d, pad_h, pad_w),
stride (stride_d, stride_h, stride_w),
dilation (dilation_d, dilation_h, dilation_w).
Free functions to map:
Map tensor extents (Conv3d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_extent(ConvolutionOperator)
Map tensor sizes (Conv3d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_size(ConvolutionOperator)
Map tensor problem sizes (Conv3d -> ImplicitGemm): implicit_gemm_problem_size(ConvolutionOperator)
*/
#pragma once
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
namespace cutlass {
namespace conv {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Problem size structure
struct Conv3dProblemSize : public Conv2dProblemSize {
//
// Type definitions
//
// 3D coordinate for padding, stride, and dilation in (d, h, w) dimensions
using Coord3D = Coord<3>;
//
// Data members
//
// Conv3d strictly problem size parameters
int D, T, Z; // input depth, filter depth, output depth
int pad_d; // padding in depth dimension
int stride_d; // stride in depth dimension
int dilation_d; // dilation in depth dimension
//
// Methods
//
public:
CUTLASS_HOST_DEVICE
Conv3dProblemSize():
D(0), T(0), Z(0),
pad_d(0),
stride_d(1),
dilation_d(1),
Conv2dProblemSize() { }
/// Constructor for default padding, stride, dilation, and split-K
CUTLASS_HOST_DEVICE
Conv3dProblemSize(
int N,
int D,
int H,
int W,
int C,
int Z,
int P,
int Q,
int K,
int T,
int R,
int S,
Mode mode
):
D(D), T(T), Z(Z),
pad_d(T / 2), stride_d(1), dilation_d(1),
Conv2dProblemSize(N, H, W, C, P, Q, K, R, S, mode) { }
/// Constructor
CUTLASS_HOST_DEVICE
Conv3dProblemSize(
int N,
int D,
int H,
int W,
int C,
int K,
int T,
int R,
int S,
int Z,
int P,
int Q,
int pad_d,
int pad_h,
int pad_w,
int stride_d,
int stride_h,
int stride_w,
int dilation_d,
int dilation_h,
int dilation_w,
Mode mode,
int split_k_slices = 1,
int groups = 1
):
D(D), T(T), Z(Z),
pad_d(pad_d), stride_d(stride_d), dilation_d(dilation_d),
Conv2dProblemSize(
N, H, W, C, K, R, S, P, Q,
pad_h, pad_w,
stride_h, stride_w,
dilation_h, dilation_w,
mode, split_k_slices, groups) { }
/// Constructs convolution problem size from cutlass Tensor5DCoord and Coord3D
// set *user-defined* output size and sets Z, P, and Q (include all data members in ctor)
CUTLASS_HOST_DEVICE
Conv3dProblemSize(
cutlass::Tensor5DCoord input_size, // NDHWC
cutlass::Tensor5DCoord filter_size, // KTRSC
Coord3D padding, // pad_d, pad_h, pad_w
Coord3D stride, // stride_d, stride_h, stride_w
Coord3D dilation, // dilation_d, dilation_h, dilation_w
cutlass::Tensor5DCoord output_size, // NZPQK
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation,
int split_k_slices = 1,
int groups = 1
):
D(input_size.d()), T(filter_size.d()), Z(output_size.d()),
pad_d(padding[0]), stride_d(stride[0]), dilation_d(dilation[0]),
Conv2dProblemSize(
{input_size.n(), input_size.h(), input_size.w(), input_size.c()},
{filter_size.n(), filter_size.h(), filter_size.w(), filter_size.c()},
{padding[1], padding[1], padding[2], padding[2]},
{stride[1], stride[2]},
{dilation[1], dilation[2]},
{output_size.n(), output_size.h(), output_size.w(), output_size.c()},
mode, split_k_slices, groups
) { }
/// Constructs convolution problem size from cutlass Tensor5DCoord and Coord3D
// *computes* output size and sets Z, P and Q (include all data members in ctor)
CUTLASS_HOST_DEVICE
Conv3dProblemSize(
cutlass::Tensor5DCoord input_size, // NDHWC
cutlass::Tensor5DCoord filter_size, // KTRSC
Coord3D padding, // pad_d, pad_h, pad_w
Coord3D stride, // stride_d, stride_h, stride_w
Coord3D dilation, // dilation_d, dilation_h, dilation_w
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation,
int split_k_slices = 1,
int groups = 1
):
D(input_size.d()), T(filter_size.d()),
pad_d(padding[0]), stride_d(stride[0]), dilation_d(dilation[0]),
Conv2dProblemSize(
{input_size.n(), input_size.h(), input_size.w(), input_size.c()},
{filter_size.n(), filter_size.h(), filter_size.w(), filter_size.c()},
{padding[1], padding[1], padding[2], padding[2]},
{stride[1], stride[2]},
{dilation[1], dilation[2]},
mode, split_k_slices, groups
) {
// set output Z
Z = ((D + pad_d * 2 - T * dilation_d) / stride_d) + 1;
}
/// Equality operator (ignores mode and split_k_slice)
CUTLASS_HOST_DEVICE
bool operator==(Conv3dProblemSize const &conv) const {
return (
(N == conv.N) && (D == conv.D) && (H == conv.H) && (W == conv.W) && (C == conv.C) &&
(K == conv.K) && (T == conv.T) && (R == conv.R) && (S == conv.S) &&
(Z == conv.Z) &&(P == conv.P) && (Q == conv.Q) &&
(pad_d == conv.pad_d) && (pad_h == conv.pad_h) && (pad_w == conv.pad_w) &&
(stride_d == conv.stride_d) && (stride_h == conv.stride_h) && (stride_w == conv.stride_w) &&
(dilation_d == conv.dilation_d) && (dilation_h == conv.dilation_h) && (dilation_w == conv.dilation_w)
);
}
/// Inequality operator
CUTLASS_HOST_DEVICE
bool operator!=(Conv3dProblemSize const &rhs) const {
return !(*this == rhs);
}
// Reset covolution mode in the problem
CUTLASS_HOST_DEVICE
Conv3dProblemSize reset_mode(cutlass::conv::Mode mode_) {
Conv3dProblemSize tmp(*this);
tmp.mode = mode_;
return tmp;
}
// Reset covolution mode in the problem
CUTLASS_HOST_DEVICE
Conv3dProblemSize reset_split_k_slices(int split_k_slices_) {
Conv3dProblemSize tmp(*this);
tmp.split_k_slices = split_k_slices_;
return tmp;
}
/// Returns activation extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord activation_extent() const {
return cutlass::Tensor5DCoord ({N, D, H, W, C});
}
/// Returns filter extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord filter_extent() const {
return cutlass::Tensor5DCoord ({K, T, R, S, C});
}
/// Returns output extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord output_extent() const {
return cutlass::Tensor5DCoord ({N, Z, P, Q, K});
}
/// Returns activation size in number of elements
CUTLASS_HOST_DEVICE
int64_t activation_size() const {
return (N * D * H * W * C);
}
/// Returns filter size in number of elements
CUTLASS_HOST_DEVICE
int64_t filter_size() const {
return (K * T * R * S * C);
}
/// Returns output size in number of elements
CUTLASS_HOST_DEVICE
int64_t output_size() const {
return (N * Z * P * Q * K);
}
/// Returns output extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
Coord3D padding() const {
return Coord3D ({pad_d, pad_h, pad_w});
}
/// Returns stride as MatrixCoord
CUTLASS_HOST_DEVICE
Coord3D stride() const {
return Coord3D ({stride_d, stride_h, stride_w});
}
/// Returns dilation as MatrixCoord
CUTLASS_HOST_DEVICE
Coord3D dilation() const {
return Coord3D ({dilation_d, dilation_h, dilation_w});
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// ImplicitGemm helper functions //
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Determine the problem size of the implicit GEMM operation
CUTLASS_HOST_DEVICE
cutlass::gemm::GemmCoord implicit_gemm_problem_size(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
// Compute problem size
switch (conv_operator) {
case Operator::kFprop:
return gemm::GemmCoord(
problem_size.N * problem_size.Z * problem_size.P * problem_size.Q,
problem_size.K,
problem_size.T * problem_size.R * problem_size.S * problem_size.C
);
case Operator::kDgrad:
return gemm::GemmCoord(
problem_size.N * problem_size.D * problem_size.H * problem_size.W,
problem_size.C,
problem_size.T * problem_size.R * problem_size.S * problem_size.K
);
case Operator::kWgrad:
return gemm::GemmCoord(
problem_size.K,
problem_size.T * problem_size.R * problem_size.S * problem_size.C,
problem_size.N * problem_size.Z * problem_size.P * problem_size.Q
);
default:
break;
}
return gemm::GemmCoord();
}
// Determine the number of gemm_k iterations for conv2d problem using implicit gemm algorithm
CUTLASS_HOST_DEVICE
int implicit_gemm_k_iterations(
Operator conv_operator,
int threadblock_K,
Conv3dProblemSize const &problem_size,
IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic,
GroupMode group_mode = GroupMode::kNone,
int threadblock_N = 0) {
int iterations = 0;
int elements_per_split_k_slice = 0;
if (group_mode == GroupMode::kNone) {
switch (conv_operator) {
case Operator::kFprop:
elements_per_split_k_slice = (problem_size.C + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = problem_size.T * problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K);
break;
case Operator::kDgrad:
elements_per_split_k_slice = (problem_size.K + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = problem_size.T * problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K);
break;
case Operator::kWgrad:
elements_per_split_k_slice = (problem_size.N * problem_size.Z * problem_size.P * problem_size.Q + problem_size.split_k_slices - 1) / problem_size.split_k_slices;
iterations = (elements_per_split_k_slice + threadblock_K - 1) / threadblock_K;
break;
default:
break;
}
} else if (group_mode == GroupMode::kDepthwise) {
int channels_per_cta = threadblock_N;
if (algorithm == IteratorAlgorithm::kAnalytic) {
switch (conv_operator) {
case Operator::kFprop:
iterations = problem_size.T * problem_size.R * problem_size.S *
((channels_per_cta + threadblock_K - 1) / threadblock_K);
break;
default:
break;
}
}
}
return iterations;
}
////////////////////////////////////////////////////////////////////////////////
// Mapping function (ImplicitGemm A, B, C -> Conv Activation, Filter, Output)
////////////////////////////////////////////////////////////////////////////////
/// Returns ImplicitGemm tensor A extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord implicit_gemm_tensor_a_extent(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.activation_extent();
case cutlass::conv::Operator::kDgrad: return problem_size.output_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.output_extent();
default : break;
}
return cutlass::Tensor5DCoord();
}
/// Returns ImplicitGemm tensor B extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord implicit_gemm_tensor_b_extent(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.filter_extent();
case cutlass::conv::Operator::kDgrad: return problem_size.filter_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.activation_extent();
default : break;
}
return cutlass::Tensor5DCoord();
}
/// Returns ImplicitGemm tensor C extent as Tensor5DCoord
CUTLASS_HOST_DEVICE
cutlass::Tensor5DCoord implicit_gemm_tensor_c_extent(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.output_extent();
case cutlass::conv::Operator::kDgrad: return problem_size.activation_extent();
case cutlass::conv::Operator::kWgrad: return problem_size.filter_extent();
default : break;
}
return cutlass::Tensor5DCoord();
}
/// Returns ImplicitGemm tensor A size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_a_size(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.activation_size();
case cutlass::conv::Operator::kDgrad: return problem_size.output_size();
case cutlass::conv::Operator::kWgrad: return problem_size.output_size();
default : break;
}
return 0;
}
/// Returns ImplicitGemm tensor B size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_b_size(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.filter_size();
case cutlass::conv::Operator::kDgrad: return problem_size.filter_size();
case cutlass::conv::Operator::kWgrad: return problem_size.activation_size();
default : break;
}
return 0;
}
/// Returns ImplicitGemm tensor C size in number of elements
CUTLASS_HOST_DEVICE
int64_t implicit_gemm_tensor_c_size(
Operator conv_operator,
Conv3dProblemSize const &problem_size) {
switch (conv_operator) {
case cutlass::conv::Operator::kFprop: return problem_size.output_size();
case cutlass::conv::Operator::kDgrad: return problem_size.activation_size();
case cutlass::conv::Operator::kWgrad: return problem_size.filter_size();
default : break;
}
return 0;
}
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| 16,292 | C | 33.085774 | 169 | 0.624969 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/convolution.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
This file contains definitions and utility functions for describing convolution problem sizes in terms of
activation (NHWC), filter (KRSC), output (NPQK), pading (pad_h, pad_w), stride (stride_h, stride_w),
dilation (dilation_h, dilation_w). Furthermore, it defines helper functions to map cutlass' implicit gemm
tensor extents, sizes, data types to that of convolutions extents, sizes, and data types.
* Mapping convolutions to Gemm computation *
Cutlass employs ImplicitGemm algorithm to implement convolutions. ImplicitGemm algorithm runs gemm operation
on convolution tensors Activation, Filter, and Output . The underlying gemm operation follows the standard
gemm definition:
C = A * B + C
A and B are input matrices
C is source and output matrix
For the three convolutional operators (Fprop, Dgrad, Wgrad), ImplicitGemm matrices A, B, and C are mapped on
to convolution tensors Activation, Filter and Output as per the below table:
___________________________________________________________________________
ConvolutionalOperator | A | B | C
___________________________________________________________________________
| | | | |
| Fprop | Activation | Filter | Output |
| Dgrad | Output | Filter | Activation |
| Wgrad | Output | Activation | Filter |
___________________________________________________________________________
In convolution codebase, DO NOT mix using (A, B, C) with (Acvitation, Filter, Output).
For example, a convolution class/function with A, B, Output is confusing and error-prone. Instead use below
mapping functions and adhere to using either A, B, C or Acvitation, Filter, Output.
Map elements' data types (ImplicitGemm -> Conv): GemmToConvElementMap
Map elements' data types (Conv -> ImplicitGemm): ConvToGemmElementMap
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
namespace cutlass {
namespace conv {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Convolutional operator
enum class Operator {
kFprop,
kDgrad,
kWgrad
};
/// Distinguishes convolution from cross correlation
enum class Mode {
kCrossCorrelation,
kConvolution
};
/// Selects among several implementation variants trading off performance with simplicity
enum class IteratorAlgorithm {
kAnalytic, ///< functionally correct in all cases but lower performance
kOptimized, ///< optimized for R <= 32, S <= 32 and unity-stride dgrad
kFixedChannels, ///< Analytic algorithm optimized for fixed channel count (C == AccessSize)
kFewChannels, ///< Analytic algorithm optimized for few channels (C divisible by AccessSize)
kFixedStrideDilation ///< Optimized for fixed stride and dilation
};
/// Distinguishes among partial specializations that accelerate certain problems where convolution
/// stride is unit.
enum class StrideSupport {
kStrided, ///< arbitrary convolution stride
kUnity, ///< unit convolution stride
kFixed ///< fixed convolution stride
};
/// Identifies split-K mode
enum class SplitKMode {
kNone,
kSerial,
kParallel
};
/// Identifies group mode
enum class GroupMode {
kNone,
kSingleGroup, ///< One CTA calculates one group or less
kMultipleGroup, ///< One CTA calculates multiple groups
kDepthwise ///< One CTA calculates cta_n groups (problem_size.C == problem_size.K == problem_size.groups)
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Shape of a tensor
template <
int N = 1,
int H = 1,
int W = 1,
int C = 1
>
struct TensorNHWCShape {
static int const kN = N;
static int const kH = H;
static int const kW = W;
static int const kC = C;
static int const kHW = H * W;
static int const kNHW = N * kHW;
static int const kNHWC = N * H * W * C;
static int const kCount = kNHWC;
//
// Static member functions
//
/// Returns a Coord object
CUTLASS_HOST_DEVICE
static Coord<4> toCoord() {
return make_Coord(kN, kH, kW, kC);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| 6,664 | C | 38.672619 | 112 | 0.596789 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/warp/mma_depthwise_simt.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/thread/mma.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/thread/depthwise_mma.h"
#include "cutlass/gemm/warp/mma_simt_tile_iterator.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/conv/warp/mma_depthwise_simt_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK = 1,
/// Complex transformation on operand A
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transformation on operand B
ComplexTransform TransformB = ComplexTransform::kNone,
/// Used for partial specialization
typename Enable = bool>
class MmaDepthwiseSimt
: public cutlass::gemm::warp::
MmaSimt<Shape_, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, Policy_> {
using Base = cutlass::gemm::warp::
MmaSimt<Shape_, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, Policy_>;
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassSimt;
/// Hard-coded for now
using ArchTag = arch::Sm50;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
public:
/// Iterates over the B operand in memory
using IteratorB = cutlass::conv::warp::DepthwiseMmaSimtTileIterator<
MatrixShape<Policy::LaneMmaShape::kK, Shape::kN>,
cutlass::gemm::Operand::kB,
ElementB,
LayoutB,
Policy,
PartitionsK,
Shape::kK
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed A tile
using TransformedFragmentB = FragmentB;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaDepthwiseSimt():Base() {}
};
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width>
typename FilterShape_,
/// Shape of the output tile computed by thread- concept: conv::TensorNHWCShape<>
typename ThreadOutputShape_,
/// Shape of the output tile computed by threadblock - concept: conv::TensorNHWCShape<>
typename ThreadBlockOutputShape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Iterator algo type
conv::IteratorAlgorithm IteratorAlgorithm_ = IteratorAlgorithm::kAnalytic,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape_ = cutlass::MatrixShape<-1, -1>,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape_ = cutlass::MatrixShape<-1, -1>,
/// Activation Shape loaded by threadblock
typename ActivationShape_ = cutlass::conv::TensorNHWCShape<-1,-1,-1,-1>,
/// Number of partitions along K dimension
int PartitionsK = 1,
/// Complex transformation on operand A
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transformation on operand B
ComplexTransform TransformB = ComplexTransform::kNone,
/// Used for partial specialization
typename Enable = bool>
class MmaDepthwiseDirectConvSimt {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width>
using FilterShape = FilterShape_;
/// Shape of the output tile computed by thread- concept: conv::TensorNHWCShape<>
using ThreadOutputShape = ThreadOutputShape_;
/// Shape of the output tile computed by threadblock - concept: conv::TensorNHWCShape<>
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Iterator algo type
static conv::IteratorAlgorithm const IteratorAlgorithm = IteratorAlgorithm_;
/// Stride ( MatrixShape<Height, Width> )
using StrideShape = StrideShape_;
/// Dilation ( MatrixShape<Height, Width> )
using DilationShape = DilationShape_;
/// Activation Shape loaded by threadblock
using ActivationShape = ActivationShape_;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassSimt;
/// Hard-coded for now
using ArchTag = arch::Sm50;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
static constexpr bool use_dp4a = (platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutA>::value ||
platform::is_same< layout::RowMajorInterleaved<4>, LayoutA >::value) &&
platform::is_same< ElementA, int8_t >::value &&
platform::is_same< ElementB, int8_t >::value;
using dp4a_type = typename platform::conditional< use_dp4a , int8_t, bool >::type;
/// Thread-level matrix multiply accumulate operator
using ThreadMma = cutlass::conv::thread::DepthwiseDirectConvElementwiseInnerProduct<
cutlass::gemm::GemmShape<
Shape::kM / Policy::WarpShape::kRow, // number of output pixels proccessed per thread
Shape::kN / Policy::WarpShape::kColumn, // number of channels proccessed per thread
1>,
ElementA,
ElementB,
ElementC,
arch::OpMultiplyAdd,
dp4a_type
>;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename ThreadMma::ArchMmaOperator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Shape of the underlying instruction
using InstructionShape = cutlass::gemm::GemmShape<1,1,use_dp4a ? 4 : 1>;
public:
/// Iterates over the A operand in memory
using IteratorA = cutlass::conv::warp::DepthwiseDirect2dConvSimtTileIterator<
MatrixShape<Shape::kM, Shape::kN>, // <output tile=(P*Q), output channels> per warp
FilterShape,
ThreadOutputShape,
ThreadBlockOutputShape,
cutlass::gemm::Operand::kA,
ElementA,
Policy,
IteratorAlgorithm,
StrideShape,
DilationShape,
ActivationShape,
PartitionsK,
Shape::kK
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = cutlass::gemm::warp::MmaSimtTileIterator<
MatrixShape<1, Shape::kN>,
cutlass::gemm::Operand::kB,
ElementB,
LayoutB,
Policy,
PartitionsK,
Shape::kK
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed A tile
using TransformedFragmentB = FragmentB;
/// Iterates over the C operand in memory
using IteratorC = cutlass::gemm::warp::MmaSimtTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
cutlass::gemm::Operand::kC,
ElementC,
LayoutC,
Policy
>;
/// Storage for C tile
using FragmentC = typename ThreadMma::FragmentC;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaDepthwiseDirectConvSimt() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &d,
FragmentA a,
FragmentB b,
FragmentC const &c, int group_idx = 0) const {
ThreadMma mma;
mma(d, a, b, c);
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
//TODO: Implement this
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace conv
} // namespace cutlass
| 12,419 | C | 31.513089 | 109 | 0.674209 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/warp/mma_depthwise_simt_tile_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Describes the lane policy used by warp-level matrix multiply operators targeting SIMT
instructions
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Iterates over operands to warp-level matrix multiply operations targeting SIMT instructions
///
/// concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
cutlass::gemm::Operand Operand,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK = 1,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize = 1
>
class DepthwiseMmaSimtTileIterator;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization for B operands of row-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize>
class DepthwiseMmaSimtTileIterator<Shape_,
cutlass::gemm::Operand::kB,
Element_,
layout::RowMajor,
Policy_,
PartitionsK,
PartitionGroupSize>
: public cutlass::gemm::warp::MmaSimtTileIterator<Shape_,
cutlass::gemm::Operand::kB,
Element_,
layout::RowMajor,
Policy_,
PartitionsK,
PartitionGroupSize> {
using Base = cutlass::gemm::warp::MmaSimtTileIterator<Shape_,
cutlass::gemm::Operand::kB,
Element_,
layout::RowMajor,
Policy_,
PartitionsK,
PartitionGroupSize>;
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static cutlass::gemm::Operand const kOperand = cutlass::gemm::Operand::kB;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::RowMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = typename Base::TensorRef;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Thread-level shape of a fragment
using ThreadShape = typename Base::ThreadShape;
/// Number of individual loads
using Iterations = typename Base::Iterations;
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
static_assert(Policy::LaneMmaShape::kN == 1, "Each thread should be 1 element per LDS along the k-dim");
private:
MatrixCoord lane_offset_;
int channel_idx_;
int base_channel_idx_;
int warps_n_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
DepthwiseMmaSimtTileIterator():Base() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
DepthwiseMmaSimtTileIterator(
TensorRef ref,
int lane_id
) : Base(ref, lane_id) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
warps_n_ = -1;
channel_idx_ = 0;
base_channel_idx_ = 0;
lane_offset_ = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN);
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
DepthwiseMmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
if(warps_n_ == -1){
warps_n_ = coord.column();
}
Base::add_tile_offset(coord);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator. (vector loads)
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kN> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kRow; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
void const *ptr = this->ref_.data() +
this->ref_.offset({-(channel_idx_ - base_channel_idx_),
n * Policy::WarpShape::kColumn}) +
pointer_offset / Policy::LaneMmaShape::kN;
// Base_k of a warp + Base_k of current threads.
int thread_k_base_idx =
warps_n_ * Shape::kColumn / Policy::LaneMmaShape::kN + lane_offset_.column();
if (channel_idx_ + k == thread_k_base_idx + n * Policy::WarpShape::kColumn) {
// Depthwise kernel would only do computation when channel == k.
// Loads an element when the current computation channel == the k corresponding to this thread.
arch::shared_load(dst_ptr[n + k * Iterations::kColumn], ptr);
} else {
// Reduce SMEM load
dst_ptr[n + k * Iterations::kColumn].fill(Element(0));
}
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
if(k_group % PartitionGroupSize == 0 && k_group != 0){
base_channel_idx_ = k_group;
}
channel_idx_ = k_group;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Size of filter (concept: gemm::GemmShape<Depth, Height, Width>)
typename FilterShape_,
/// Size of the matrix to load (concept: MatrixShape)
typename ThreadOutputShape_,
/// Size of the matrix to load (concept: MatrixShape)
typename ThreadBlockOutputShape_,
/// Operand identity
cutlass::gemm::Operand Operand,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Iterator algo type
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape = cutlass::MatrixShape<-1, -1>,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape = cutlass::MatrixShape<-1, -1>,
/// Activation Shape loaded by threadblock
typename ActivationShape = cutlass::conv::TensorNHWCShape<-1,-1,-1,-1>,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK = 1,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize = 1>
class DepthwiseDirect2dConvSimtTileIterator;
/// Specialization for A operands of row-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Size of filter (concept: gemm::GemmShape<Depth, Height, Width>)
typename FilterShape_,
/// Size of the matrix to load (concept: TensorNHWC)
typename ThreadOutputShape_,
/// Size of the matrix to load (concept: TensorNHWC)
typename ThreadBlockOutputShape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Iterator algo type
conv::IteratorAlgorithm IteratorAlgorithm,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape,
/// Activation Shape loaded by threadblock
typename ActivationShape,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize>
class DepthwiseDirect2dConvSimtTileIterator<Shape_,
FilterShape_,
ThreadOutputShape_,
ThreadBlockOutputShape_,
cutlass::gemm::Operand::kA,
Element_,
Policy_,
IteratorAlgorithm,
StrideShape,
DilationShape,
ActivationShape,
PartitionsK,
PartitionGroupSize> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of filter (concept: gemm::GemmShape<Depth, Height, Width>)
using FilterShape = FilterShape_;
/// Shape of tile to load (concept: TensorNHWC)
using ThreadOutputShape = ThreadOutputShape_;
/// Shape of tile to load (concept: TensorNHWC)
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
/// Operand tag
static cutlass::gemm::Operand const kOperand = cutlass::gemm::Operand::kA;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::RowMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
//
// Derived quantities
//
static_assert(!(Shape::kRow % Policy::WarpShape::kRow),
"The warp-level GEMM M size must be divisible by the number of threads arranged along the M dimension.");
static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero.");
static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero.");
static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero.");
static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero.");
// Thread-level shape of a fragment
using ThreadShape = MatrixShape<
ThreadOutputShape::kNHW, // Output tile shape Computed by current threads
ThreadOutputShape::kC
>;
static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
/// Number of individual loads
using Iterations = MatrixShape<
ThreadShape::kRow,
ThreadShape::kColumn / Policy::LaneMmaShape::kN
>;
using ThreadTileCount = MatrixShape<
ThreadBlockOutputShape::kH / ThreadOutputShape::kH,
ThreadBlockOutputShape::kW / ThreadOutputShape::kW
>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, ThreadShape::kCount>;
protected:
/// Internal reference
cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kN>, layout::RowMajor> ref_;
int activation_offset[ThreadOutputShape::kH][ThreadOutputShape::kW][Iterations::kColumn];
int iterator_r_;
int iterator_s_;
int iterator_offset_;
int inc_next_s_ ;
int inc_next_r_ ;
MatrixCoord lane_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator(
TensorRef ref,
int lane_id
) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
// Set channel offset
lane_offset_ = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN);
ref.add_coord_offset(lane_offset_);
ref_.reset(reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(ref.data()),
ref.stride(0) / Policy::LaneMmaShape::kN);
iterator_r_ = 0;
iterator_s_ = 0;
iterator_offset_ = 0;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
template<typename Params>
CUTLASS_HOST_DEVICE
void setup_initial_status(Params const& params) {
inc_next_s_ = params.inc_next[0];
inc_next_r_ = params.inc_next[1];
// Get base HW offset of current threads
int threadgroup = threadIdx.x / (ThreadBlockOutputShape::kC / ThreadOutputShape::kC);
int base_p_ =
(threadgroup / (ThreadTileCount::kColumn)) * ThreadOutputShape::kH;
int base_q_ =
(threadgroup % (ThreadTileCount::kColumn)) * ThreadOutputShape::kW;
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < ThreadOutputShape::kH; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int q = 0; q < ThreadOutputShape::kW; ++q) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < Iterations::kColumn; ++col) {
int base_w = (base_q_ + q) * params.stride[0];
int base_h = (base_p_ + p) * params.stride[1];
int offset = base_h * params.activation_tile_w + base_w;
activation_offset[p][q][col] = offset;
}
}
}
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
// Set warp row and col start
lane_offset_ = MatrixCoord({lane_offset_.row() + coord.row() * Shape::kRow, lane_offset_.column()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
void advance(int32_t pointer_offset) {
ref_.reset(ref_.data() + pointer_offset / sizeof(Element) / Policy::LaneMmaShape::kN);
iterator_s_ = 0;
iterator_r_ = 0;
iterator_offset_ = 0;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &operator++() {
++iterator_s_;
if (iterator_s_ < FilterShape::kColumn) {
iterator_offset_ += inc_next_s_;
return *this;
}
iterator_s_ = 0;
++iterator_r_;
if (iterator_r_ < FilterShape::kRow) {
iterator_offset_ += inc_next_r_;
return *this;
}
iterator_r_ = 0;
iterator_offset_ = 0;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator & operator--() {
// Do nothing
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator. (vector loads)
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kN> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < ThreadOutputShape::kH; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int q = 0; q < ThreadOutputShape::kW; ++q) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
void const *ptr = ref_.data() +
ref_.offset({activation_offset[p][q][n] + (iterator_offset_),
n * Policy::WarpShape::kColumn}) +
pointer_offset / Policy::LaneMmaShape::kN;
arch::shared_load(dst_ptr[n + q + p * ThreadOutputShape::kW], ptr);
}
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
// Do nothing at present.
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, Index pointer_offset) const {
store_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization for A operands of row-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Size of filter (concept: gemm::GemmShape<Depth, Height, Width>)
typename FilterShape_,
/// Size of the matrix to load (concept: TensorNHWC)
typename ThreadOutputShape_,
/// Size of the matrix to load (concept: TensorNHWC)
typename ThreadBlockOutputShape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape_,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape_,
/// Activation Shape loaded by threadblock
typename ActivationShape_,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize>
class DepthwiseDirect2dConvSimtTileIterator<Shape_,
FilterShape_,
ThreadOutputShape_,
ThreadBlockOutputShape_,
cutlass::gemm::Operand::kA,
Element_,
Policy_,
IteratorAlgorithm::kFixedStrideDilation,
StrideShape_,
DilationShape_,
ActivationShape_,
PartitionsK,
PartitionGroupSize> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of filter (concept: gemm::GemmShape<Depth, Height, Width>)
using FilterShape = FilterShape_;
/// Shape of tile to load (concept: TensorNHWC)
using ThreadOutputShape = ThreadOutputShape_;
/// Shape of tile to load (concept: TensorNHWC)
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
/// Stride ( MatrixShape<Height, Width> )
using StrideShape = StrideShape_;
/// Dilation ( MatrixShape<Height, Width> )
using DilationShape = DilationShape_;
/// Activation Shape loaded by threadblock
using ActivationShape = ActivationShape_;
/// Operand tag
static cutlass::gemm::Operand const kOperand = cutlass::gemm::Operand::kA;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::RowMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
//
// Derived quantities
//
static_assert(!(Shape::kRow % Policy::WarpShape::kRow),
"The warp-level GEMM M size must be divisible by the number of threads arranged "
"along the M dimension.");
static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero.");
static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero.");
static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero.");
static_assert(Shape::kRow / Policy::WarpShape::kRow > 0,
"Shape::kRow / Policy::WarpShape::kRow must be greater than zero.");
// Activations loaded by threadblock
static int const ThreadActivationShapeH = (ThreadOutputShape::kH - 1) * StrideShape::kRow +
(FilterShape::kRow - 1) * DilationShape::kRow + 1;
static int const ThreadActivationShapeW = (ThreadOutputShape::kW - 1) * StrideShape::kColumn +
(FilterShape::kColumn - 1) * DilationShape::kColumn + 1;
using ThreadActivationShape = cutlass::conv::
TensorNHWCShape<1, ThreadActivationShapeH, ThreadActivationShapeW, ThreadOutputShape::kC>;
// Thread-level shape of a fragment
using ThreadShape =
MatrixShape<ThreadOutputShape::kNHW,
ThreadOutputShape::kC>;
static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
/// Number of individual loads
using Iterations =
MatrixShape<ThreadShape::kRow, ThreadShape::kColumn / Policy::LaneMmaShape::kN>;
using ThreadTileCount = MatrixShape<ThreadBlockOutputShape::kH / ThreadOutputShape::kH,
ThreadBlockOutputShape::kW / ThreadOutputShape::kW>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, ThreadShape::kCount>;
protected:
/// Internal reference
cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kN>, layout::RowMajor> ref_;
Array<Element, Policy::LaneMmaShape::kN>
activation[ThreadActivationShape::kH][ThreadActivationShape::kW][Iterations::kColumn];
int iterator_r_;
int iterator_s_;
MatrixCoord lane_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator() {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator(TensorRef ref, int lane_id) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
// Set channel offset
lane_offset_ = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN);
ref.add_coord_offset(lane_offset_);
ref_.reset(reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(ref.data()),
ref.stride(0) / Policy::LaneMmaShape::kN);
iterator_r_ = 0;
iterator_s_ = 0;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
template <typename Params>
CUTLASS_HOST_DEVICE void setup_initial_status(
Params const ¶ms) {
// Get base HW offset of current threads
int threadgroup = threadIdx.x / (ThreadBlockOutputShape::kC / ThreadOutputShape::kC);
int base_h =
(threadgroup / (ThreadTileCount::kColumn)) * ThreadOutputShape::kH * StrideShape::kRow;
int base_w =
(threadgroup % (ThreadTileCount::kColumn)) * ThreadOutputShape::kW * StrideShape::kColumn;
CUTLASS_PRAGMA_UNROLL
for (int h = 0; h < ThreadActivationShape::kH; ++h) {
CUTLASS_PRAGMA_UNROLL
for (int w = 0; w < ThreadActivationShape::kW; ++w) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < Iterations::kColumn; ++col) {
int offset = (base_h + h) * ActivationShape::kW + (base_w + w);
void const *ptr = ref_.data() + ref_.offset({offset, col * Policy::WarpShape::kColumn});
arch::shared_load(activation[h][w][col], ptr);
}
}
}
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
// Set warp row and col start
lane_offset_ =
MatrixCoord({lane_offset_.row() + coord.row() * Shape::kRow, lane_offset_.column()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
void advance(int32_t pointer_offset) {
ref_.reset(ref_.data() + pointer_offset / sizeof(Element) / Policy::LaneMmaShape::kN);
iterator_s_ = 0;
iterator_r_ = 0;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &operator++() {
++iterator_s_;
if (iterator_s_ < FilterShape::kColumn) {
return *this;
}
iterator_s_ = 0;
++iterator_r_;
if (iterator_r_ < FilterShape::kRow) {
return *this;
}
iterator_r_ = 0;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &operator--() {
// Do nothing
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator. (vector loads)
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kN> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < ThreadOutputShape::kH; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int q = 0; q < ThreadOutputShape::kW; ++q) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
const int h = p * StrideShape::kRow + iterator_r_ * DilationShape::kRow;
const int w = q * StrideShape::kColumn + iterator_s_ * DilationShape::kColumn;
dst_ptr[n + q + p * ThreadOutputShape::kW] = activation[h][w][n];
}
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); }
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
// Do nothing at present.
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, Index pointer_offset) const {
store_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
} // namespace warp
} // namespace conv
} // namespace cutlass
| 30,655 | C | 34.522596 | 127 | 0.625999 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/warp/scale_bias_relu_transform.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level per channel scale+bias+relu before
matrix multiply-accumulate operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename FragmentActivations, typename FragmentScaleBias>
struct FpropScaleBiasReluTransform {
using T = typename FragmentActivations::Element;
static int const NumActivations = FragmentActivations::kElements;
static int const NumScaleBias = FragmentScaleBias::kElements;
static int const MmaElements = 2;
// One element has one scale and one bias
static int const MmaScaleBiasPair = 2;
// 16816 has 2 columns
static int const MmaCols = 2;
using MmaOperand = Array<T, MmaElements>;
using ScaleBiasOperand = Array<T, MmaElements * MmaScaleBiasPair>;
CUTLASS_DEVICE
void transform(MmaOperand &activations, ScaleBiasOperand const &scale_bias) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
uint32_t *ptr_activations = reinterpret_cast<uint32_t *>(&activations);
uint32_t const *ptr_scale_bias = reinterpret_cast<uint32_t const *>(&scale_bias);
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
// We assumes the pair of FP16 are either both inbound or both out-of-bound.
// It requires C to be an even number.
asm volatile(
"{\n\t"
" .reg .pred %%p;\n\t"
" .reg .b32 t1;\n\t"
" setp.eq.u32 %%p, %2, %4;\n\t"
" fma.rn.f16x2.relu t1, %1, %2, %3;\n"
" selp.u32 %0, 0, t1, %%p;\n\t"
"}\n"
: "=r"(ptr_activations[0])
: "r"(ptr_scale_bias[0]), "r"(ptr_activations[0]),
"r"(ptr_scale_bias[1]), "n"(cutlass::arch::OOB_NAN_F16x2));
#else
// TODO: write emulation code
assert(0);
#endif
}
CUTLASS_DEVICE
void operator()(FragmentActivations &activations,
FragmentScaleBias const &scale_bias) {
MmaOperand *ptr_activations = reinterpret_cast<MmaOperand *>(&activations);
ScaleBiasOperand const *ptr_scale_bias =
reinterpret_cast<ScaleBiasOperand const *>(&scale_bias);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < (NumActivations / MmaElements); ++i) {
transform(ptr_activations[i], ptr_scale_bias[(i / MmaScaleBiasPair) % MmaCols]);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename FragmentActivations, typename FragmentScaleBias>
struct WgradScaleBiasReluTransform {
using T = typename FragmentActivations::Element;
static int const NumActivations = FragmentActivations::kElements;
static int const NumScaleBias = FragmentScaleBias::kElements;
static int const MmaElements = 2;
// One element has one scale and one bias
static int const MmaScaleBiasPair = 2;
// 16816 has 2 rows
static int const MmaRows = 2;
using MmaOperand = Array<T, MmaElements>;
using ScaleBiasOperand = Array<__half2, MmaScaleBiasPair>;
CUTLASS_DEVICE
void transform(MmaOperand &activations, ScaleBiasOperand const &scale_bias) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
__half2 *ptr_activations = reinterpret_cast<__half2 *>(&activations);
uint32_t const *ptr_scale_bias = reinterpret_cast<uint32_t const *>(&scale_bias);
#if 1
// CUDA + PTX version
bool h1_oob = (reinterpret_cast<uint16_t &>(ptr_activations[0].x) == cutlass::arch::OOB_NAN_F16);
bool h2_oob = (reinterpret_cast<uint16_t &>(ptr_activations[0].y) == cutlass::arch::OOB_NAN_F16);
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
// We cannot gurantee that the pair of F16 are both in bound or both
// out-of-bound because C x R x S can be an odd number.
asm volatile(
"{\n\t"
" fma.rn.f16x2.relu %0, %1, %2, %3;\n"
"}"
: "=r"(reinterpret_cast<uint32_t &>(ptr_activations[0]))
: "r"(ptr_scale_bias[0]), "r"(reinterpret_cast<uint32_t &>(ptr_activations[0])),
"r"(ptr_scale_bias[1]));
reinterpret_cast<uint32_t &>(ptr_activations[0]) = h1_oob ?
(reinterpret_cast<uint32_t &>(ptr_activations[0]) & 0xffff0000) :
reinterpret_cast<uint32_t &>(ptr_activations[0]);
reinterpret_cast<uint32_t &>(ptr_activations[0]) = h2_oob ?
(reinterpret_cast<uint32_t &>(ptr_activations[0]) & 0xffff) :
reinterpret_cast<uint32_t &>(ptr_activations[0]);
#else
// pure PTX version
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
asm volatile(
"{\n"
" .reg .b16 t1, t2;\n"
" .reg .b32 t3, t4, t5, t6;\n"
" .reg .pred p1, p2;\n"
" mov.b32 {t1, t2}, %2;\n"
" setp.eq.s16 p1, t1, %4;\n"
" setp.eq.s16 p2, t2, %4;\n"
" fma.rn.f16x2.relu t3, %1, %2, %3;\n"
" and.b32 t4, t3, %5;\n"
" selp.b32 t5, t4, t3, p1;\n"
" and.b32 t6, t5, %6;\n"
" selp.b32 %0, t6, t5, p2;\n"
"}\n"
: "=r"(reinterpret_cast<uint32_t &>(ptr_activations[0]))
: "r"(ptr_scale_bias[0]), "r"(reinterpret_cast<uint32_t &>(ptr_activations[0])),
"r"(ptr_scale_bias[1]), "n"(cutlass::arch::OOB_NAN_F16), "n"(0xffff0000), "n"(0x0000ffff));
#endif
#else
// TODO: write emulation code
assert(0);
#endif
}
CUTLASS_DEVICE
void operator()(FragmentActivations &activations,
FragmentScaleBias const &scale_bias) {
MmaOperand *ptr_activations = reinterpret_cast<MmaOperand *>(&activations);
ScaleBiasOperand const *ptr_scale_bias =
reinterpret_cast<ScaleBiasOperand const *>(&scale_bias);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < (NumActivations / MmaElements); ++i) {
transform(ptr_activations[i], ptr_scale_bias[(i / MmaRows)]);
}
}
};
} // namespace warp
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,772 | C | 38.165178 | 101 | 0.625627 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/thread/depthwise_mma.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for depthwise convolution
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/arch/mma.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// MMA operation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Number of threads participating
int kThreads_,
/// Data type of A elements
typename ElementA,
/// Data type of B elements
typename ElementB,
/// Element type of C matrix
typename ElementC,
/// Inner product operator
typename Operator
>
struct ElementwiseInnerProduct;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// General implementation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Data type of B elements
typename ElementB_,
/// Element type of C matrix
typename ElementC_>
struct ElementwiseInnerProduct<Shape_, 1, ElementA_, ElementB_, ElementC_, arch::OpMultiplyAdd> {
using Shape = Shape_;
using Operator = arch::OpMultiplyAdd;
using ElementC = ElementC_;
CUTLASS_HOST_DEVICE
void operator()(Array<ElementC_, Shape::kN> &d,
Array<ElementA_, Shape::kN> const &a,
Array<ElementB_, Shape::kN> const &b,
Array<ElementC_, Shape::kN> const &c) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Shape::kN; ++i) {
d[i] = a[i] * b[i] + c[i];
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of half_t
template <>
struct ElementwiseInnerProduct<
gemm::GemmShape<2, 2, 1>,
1,
half_t,
half_t,
half_t,
arch::OpMultiplyAdd> {
using Shape = gemm::GemmShape<2, 2, 1>;
using Operator = arch::OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 2> &d,
Array<half_t, 2> const &a,
Array<half_t, 2> const &b,
Array<half_t, 2> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 const & A = reinterpret_cast<__half2 const &>(a);
__half2 const & B = reinterpret_cast<__half2 const &>(b);
__half2 const & C = reinterpret_cast<__half2 const &>(c);
__half2 tmp_D = __hfma2(A, B, C);
d = reinterpret_cast<Array<half_t, 2> const &>(tmp_D);
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
d[i] = a[i] * b[i] + c[i];
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape,
/// Data type of A elements
typename ElementA,
/// Data type of B elements
typename ElementB,
/// Element type of C matrix
typename ElementC,
/// Concept: arch::OpMultiplyAdd or arch::Mma<>
typename Operator = arch::OpMultiplyAdd,
/// Used for partial specialization
typename Enable = bool
>
struct DepthwiseDirectConvElementwiseInnerProduct;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles all packed matrix layouts
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Data type of B elements
typename ElementB_,
/// Element type of C matrix
typename ElementC_,
/// Operator used to compute GEMM
typename Operator_
>
struct DepthwiseDirectConvElementwiseInnerProductGeneric {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = ElementA_;
/// Data type of operand B
using ElementB = ElementB_;
/// Element type of operand C
using ElementC = ElementC_;
/// Underlying mathematical operator
using Operator = Operator_;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMN>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Instruction
using MmaOp = cutlass::conv::thread::ElementwiseInnerProduct<
gemm::GemmShape<Shape::kN, Shape::kN, 1>,
1,
ElementA,
ElementB,
ElementC,
Operator>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
Array<ElementC, Shape::kN> *ptr_D = reinterpret_cast<Array<ElementC, Shape::kN> *>(&D);
Array<ElementA, Shape::kN> const *ptr_A =
reinterpret_cast<Array<ElementA, Shape::kN> const *>(&A);
Array<ElementB, Shape::kN> const *ptr_B =
reinterpret_cast<Array<ElementB, Shape::kN> const *>(&B);
MmaOp mma_op;
// Copy accumulators
D = C;
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN / MmaOp::Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
Array<ElementC, MmaOp::Shape::kN> tmpD = ptr_D[m * Shape::kN / MmaOp::Shape::kN + n];
Array<ElementA, MmaOp::Shape::kN> tmpA = ptr_A[m * Shape::kN / MmaOp::Shape::kN + n];
Array<ElementB, MmaOp::Shape::kN> tmpB = ptr_B[n];
mma_op(tmpD, tmpA, tmpB, tmpD);
ptr_D[m * Shape::kN / MmaOp::Shape::kN + n] = tmpD;
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Data type of B elements
typename ElementB_,
/// Element type of C matrix
typename ElementC_
>
struct DepthwiseDirectConvElementwiseInnerProduct<
Shape_,
ElementA_,
ElementB_,
ElementC_,
arch::OpMultiplyAdd
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = ElementA_;
/// Data type of operand B
using ElementB = ElementB_;
/// Element type of operand C
using ElementC = ElementC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA =
Array<ElementA, Shape::kMN>; // output_tile_size per thread * groups_per_thread
/// B operand storage
using FragmentB = Array<ElementB, Shape::kN>; // 1 * groups_per_thread
/// C operand storage
using FragmentC =
Array<ElementC, Shape::kMN>; // output_tile_size per thread * groups_per_thread
static bool const use_optimized = 0;
using ArchMmaOperator = DepthwiseDirectConvElementwiseInnerProductGeneric<Shape,
ElementA,
ElementB,
ElementC,
Operator>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
ArchMmaOperator mma;
mma(D, A, B, C);
}
};
} // namespace thread
} // namespace conv
} // namespace cutlass
| 9,689 | C | 28.723926 | 100 | 0.590154 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_analytic.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dFpropFilterTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv3dAnalyticParams<Layout>;
private:
Params const ¶ms_;
ConvProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
int filter_t_;
int filter_r_;
int filter_s_;
int filter_c_;
int offset_k_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv3dFpropFilterTileAccessIteratorAnalytic(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_t_(0),
filter_r_(0),
filter_s_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.row() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] = threadblock_offset.column() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * 8 / sizeof_bits<Element>::value;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
return;
}
filter_t_ = 0;
filter_c_ += Shape::kRow * problem_size_.split_k_slices;
}
/// Returns the coordinate in the filter tensor W that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int k = offset_k_[iteration_strided_];
return TensorCoord(k, filter_t_, filter_r_, filter_s_, filter_c_);
}
/// Returns true if the current coordinate is within the activations tensor W
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.K &&
coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dFpropFilterTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 7,945 | C | 30.283464 | 107 | 0.654751 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dDgradOutputGradientTileAccessIteratorOptimized;
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradOutputGradientTileAccessIteratorOptimized strided dgrad needs special handling
// to skip MMAs (Dx = Dy * w) on invalid filter positions
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradOutputGradientTileAccessIteratorOptimized <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kStrided,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
using Mask = uint64_t;
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or greater.");
//
// Simpligying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dStridedDgradOutputGradientIteratorOptimizedParams;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
int filter_k_;
int filter_r_;
int filter_s_;
int start_r_;
int start_s_;
int64_t reset_bytes_s_;
int64_t reset_bytes_r_;
Index masks_[ThreadMap::Iterations::kStrided][kAccessesPerVector][2];
public:
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorOptimized(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord() // threadblock offset - units are whole CTA tiles
):
params_(params),
problem_size_(problem_size),
filter_k_(0),
filter_r_(start_r),
filter_s_(start_s),
start_r_(start_r),
start_s_(start_s) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
reset_bytes_s_ = (problem_size_.num_gemm_k_filter_s(start_s_) - 1) * params_.inc_next[0];
reset_bytes_r_ = (problem_size_.num_gemm_k_filter_s(start_s_) - 1) * params_.inc_next[0] +
(problem_size_.num_gemm_k_filter_r(start_r_) - 1) * params_.inc_next[1];
int offset_n[ThreadMap::Iterations::kStrided];
int offset_p[ThreadMap::Iterations::kStrided];
int offset_q[ThreadMap::Iterations::kStrided];
int filter_r = filter_r_;
int filter_s = filter_s_;
if (problem_size_.mode == Mode::kConvolution) {
filter_r = (problem_size_.R - 1 - filter_r);
filter_s = (problem_size_.S - 1 - filter_s);
}
// Starting h, w positions for filter position in gemm_k=0
int start_h, start_w;
strided_dgrad_starting_coords(
problem_size_,
stride_h_divmod, stride_w_divmod,
filter_r, filter_s,
start_h, start_w);
// Effective starting P and Q for filter position required for remapping NHW rows
int P = (problem_size_.H - start_h + problem_size_.stride_h - 1) / problem_size_.stride_h;
int Q = (problem_size_.W - start_w + problem_size_.stride_w - 1) / problem_size_.stride_w;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_npq = (threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided) % params_.tiled_rows_per_filter;
// (STEP 1) [reorder NHW rows to start with same filter positions]
offset_n[s] = offset_npq / (P * Q);
int residual = offset_npq % (P * Q);
int p = (residual / Q);
int q = (residual % Q);
int mapped_h = (start_h + p * problem_size_.stride_h);
int mapped_w = (start_w + q * problem_size_.stride_w);
// Access (p, q) coordinates for Dy tensor for filter position in gemm_k=0
// note that (h + pad_h - filter_r) and (w + pad_w - filter_s) are ensured to be
// divisible by stride_h and stride_w
offset_p[s] = (mapped_h + problem_size_.pad_h - filter_r) / problem_size_.stride_h;
offset_q[s] = (mapped_w + problem_size_.pad_w - filter_s) / problem_size_.stride_w;
// Intialize pointers for gemm_k=0
TensorCoord coord{offset_n[s], offset_p[s], offset_q[s], filter_k_};
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
//
// Precompute mask predicates
//
clear_mask();
CUTLASS_PRAGMA_NO_UNROLL
for (int r = start_r; r < problem_size_.R; r += problem_size_.stride_h) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int p = offset_p[s_idx] ;
p += (params_.conv_sign * (r / problem_size_.stride_h));
bool pred = (offset_n[s_idx] < problem_size_.N && p >= 0 && p < problem_size_.P);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][0] |= (pred << r);
}
}
}
CUTLASS_PRAGMA_NO_UNROLL
for(int s = start_s; s < problem_size_.S; s += problem_size_.stride_w) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int q = offset_q[s_idx];
q += (params_.conv_sign * (s / problem_size_.stride_w));
bool pred = (q >=0 && q < problem_size_.Q);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][1] |= (pred << s);
}
}
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, (filter_k_ + v_idx * AccessType::kElements) >= problem_size.K);
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn});
}
private:
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset, LongIndex byte_reset = 0) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset - byte_reset;
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_DEVICE
void advance() {
int next_idx = 0;
int64_t reset_bytes = 0;
// Move filter_s by stride_w
filter_s_ += problem_size_.stride_w;
if (filter_s_ >= problem_size_.S) {
// Restore filter_s
filter_s_ = start_s_;
// Move filter_r by stride_h
filter_r_ += problem_size_.stride_h;
#if 0
if (filter_r_ < problem_size_.R) {
next_idx = 1;
// Restore bytes in q coordinate (Mma in filter s dimenstion)
reset_bytes = reset_bytes_s_;
} else {
// Restore filter_r
filter_r_ = start_r_;
next_idx = 2;
// Restore bytes in p and q coordinate (Mma in filter s and r dimenstion)
reset_bytes = reset_bytes_r_;
}
#else
asm volatile(
"{\n\t"
" .reg .pred %%p;\n\t"
" setp.lt.s32 %%p, %3, %4;\n\t"
" selp.s32 %0, %3, %5, %%p;\n\t"
" selp.s32 %1, 1, 2, %%p;\n\t"
" selp.s64 %2, %6, %7, %%p;\n\t"
"}\n"
: "=r"(filter_r_), "=r"(next_idx), "=l"(reset_bytes)
: "r"(filter_r_), "r"(problem_size_.R), "r"(start_r_),
"l"(reset_bytes_s_), "l"(reset_bytes_r_));
#endif
}
// offset pointers by offset_bytes
add_byte_offset_(params_.inc_next[next_idx] - reset_bytes);
if (next_idx == 2) {
filter_k_ += params_.filter_k_delta;
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, (filter_k_ + v_idx * AccessType::kElements) >= problem_size_.K);
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
masks_[s][v][0] = clear ? Mask(0) : masks_[s][v][0];
masks_[s][v][1] = clear ? Mask(0) : masks_[s][v][1];
}
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(int v, bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][v][0] = clear ? Mask(0) : masks_[s][v][0];
masks_[s][v][1] = clear ? Mask(0) : masks_[s][v][1];
}
}
/// Returns true if the current coordinate is within the output tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
return
(masks_[iteration_strided_][iteration_vector_][0] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][iteration_vector_][1] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
// Limit on filter size
if (problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradOutputGradientTileAccessIteratorOptimized unity stride dgrad is optimized for dgrad
// with problem stride = {1x1}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradOutputGradientTileAccessIteratorOptimized <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kUnity,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
using Mask = uint64_t;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dDgradOutputGradientIteratorOptimizedParams;
private:
Conv2dDgradOutputGradientIteratorOptimizedParams const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
// current filter position (r, s)
int filter_r_;
int filter_s_;
int filter_k_;
Index masks_[ThreadMap::Iterations::kStrided][kAccessesPerVector][2];
public:
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorOptimized(
Conv2dDgradOutputGradientIteratorOptimizedParams const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
filter_k_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
int offset_n[ThreadMap::Iterations::kStrided];
int offset_h[ThreadMap::Iterations::kStrided];
int offset_w[ThreadMap::Iterations::kStrided];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_nhw = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// offset_n[s] = offset_nhw / (problem_size_.H * problem_size_.W);
// int residual = offset_nhw % (problem_size_.H * problem_size_.W);
//
// offset_h[s] = residual / problem_size_.W;
// offset_w[s] = residual % problem_size_.W;
//
int residual;
params_.hw_divmod(offset_n[s], residual, offset_nhw);
params_.w_divmod(offset_h[s], offset_w[s], residual);
TensorCoord coord = at_(offset_n[s], offset_h[s], offset_w[s], 0, 0);
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
clear_mask();
CUTLASS_PRAGMA_NO_UNROLL
for (int r = 0; r < problem_size_.R; ++r) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int r_ = r;
if (problem_size_.mode == Mode::kConvolution) {
r_ = problem_size_.R - 1 - r;
}
int p = offset_h[s_idx] + problem_size_.pad_h - r_ * problem_size_.dilation_h;
bool pred = (offset_n[s_idx] < problem_size_.N && p >= 0 && p < problem_size_.P);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][0] |= (pred << r);
}
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int s = 0; s < problem_size_.S; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int s_ = s;
if (problem_size_.mode == Mode::kConvolution) {
s_ = problem_size_.S - 1 - s;
}
int q = offset_w[s_idx] + problem_size_.pad_w - s_ * problem_size_.dilation_w;
bool pred = (q >= 0 && q < problem_size_.Q);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][1] |= (pred << s);
}
}
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_k_ + v_idx * AccessType::kElements >= problem_size.K);
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided});
}
private:
/// Returns the coordinate in the output gradient tensor dy that is correspoinding to
// activation nhw and filter position k, r, s
CUTLASS_HOST_DEVICE
TensorCoord at_(int n, int h, int w, int r, int s) const {
if (problem_size_.mode == Mode::kConvolution) {
r = problem_size_.R - 1 - r;
s = problem_size_.S - 1 - s;
}
int p = h + problem_size_.pad_h - r * problem_size_.dilation_h;
int q = w + problem_size_.pad_w - s * problem_size_.dilation_w;
return TensorCoord(n, p, q, filter_k_);
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset;
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_HOST_DEVICE
void advance() {
int next_idx = 0;
// moves to the next tile
++filter_s_;
if (filter_s_ == problem_size_.S) {
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
next_idx = 1;
}
else {
filter_r_ = 0;
next_idx = 2;
}
}
add_byte_offset_(params_.inc_next[next_idx]);
if (next_idx == 2) {
filter_k_ += params_.filter_k_delta;
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, (filter_k_ + v_idx * AccessType::kElements) >= problem_size_.K);
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
masks_[s][v][0] = clear ? Mask(0) : masks_[s][v][0];
masks_[s][v][1] = clear ? Mask(0) : masks_[s][v][1];
}
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(int v, bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][v][0] = clear ? Mask(0) : masks_[s][v][0];
masks_[s][v][1] = clear ? Mask(0) : masks_[s][v][1];
}
}
CUTLASS_HOST_DEVICE
bool valid() {
return
(masks_[iteration_strided_][iteration_vector_][0] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][iteration_vector_][1] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// This is specialized for unit stride
if (problem_size.stride() != MatrixCoord({1, 1})) {
return Status::kErrorNotSupported;
}
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorNotSupported;
}
// Limit on filter size
if (problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 26,137 | C | 30.798053 | 140 | 0.6161 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_analytic.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kStrided
>
class Conv3dDgradOutputGradientTileAccessIteratorAnalytic;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv3dDgradOutputGradientTileAccessIteratorAnalytic strided dgrad needs special handling using
// unscaled coordinations
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dDgradOutputGradientTileAccessIteratorAnalytic <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kStrided
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or greater.");
//
// Simpligying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
struct Params {
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
ConvProblemSize const &problem_size,
Layout const &layout
): layout(layout) {
}
};
private:
Params const ¶ms_;
ConvProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
int filter_k_;
int filter_t_;
int filter_r_;
int filter_s_;
int offset_n_[ThreadMap::Iterations::kStrided];
int offset_d_[ThreadMap::Iterations::kStrided];
int offset_w_[ThreadMap::Iterations::kStrided];
int offset_h_[ThreadMap::Iterations::kStrided];
private:
/// Returns the coordinate in the output tensor Dy that is currently pointed to
/// by the iterator but DOES NOT scale by the convolution stride. This is needed
/// to compute predicates in the valid() method. The return value of the public at()
/// method is correctly scaled.
CUTLASS_HOST_DEVICE
TensorCoord unscaled_at_() const {
int n = offset_n_[iteration_strided_];
int d = offset_d_[iteration_strided_];
int h = offset_h_[iteration_strided_];
int w = offset_w_[iteration_strided_];
int t = filter_t_;
int r = filter_r_;
int s = filter_s_;
if (problem_size_.mode == Mode::kConvolution) {
t = (problem_size_.T - 1 - t);
r = (problem_size_.R - 1 - r);
s = (problem_size_.S - 1 - s);
}
int z = (d + problem_size_.pad_d - t * problem_size_.dilation_d);
int p = (h + problem_size_.pad_h - r * problem_size_.dilation_h);
int q = (w + problem_size_.pad_w - s * problem_size_.dilation_w);
return TensorCoord(n, z, p, q, filter_k_);
}
public:
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientTileAccessIteratorAnalytic(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // threadblock offset - units are whole CTA tiles
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_k_(0),
filter_t_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
int offset_ndhw = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
offset_n_[s] = offset_ndhw / (problem_size_.D * problem_size_.H * problem_size_.W);
int residual = offset_ndhw % (problem_size_.D * problem_size_.H * problem_size_.W);
offset_d_[s] = residual / (problem_size_.H * problem_size_.W);
residual = residual % (problem_size_.H * problem_size_.W);
offset_h_[s] = residual / problem_size_.W;
offset_w_[s] = residual % problem_size_.W;
}
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// move to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
return;
}
filter_t_ = 0;
filter_k_ += Shape_::kColumn * problem_size_.split_k_slices;
}
/// Returns the coordinate in the output tensor Dy that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
TensorCoord coord = unscaled_at_();
return TensorCoord(
coord.n(),
coord.d() / problem_size_.stride_d,
coord.h() / problem_size_.stride_h,
coord.w() / problem_size_.stride_w,
coord.c());
}
/// Returns true if the current coordinate is within the output tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord unscaled_coord = unscaled_at_();
TensorCoord coord = at();
return
!(unscaled_coord.d() % problem_size_.stride_d) &&
!(unscaled_coord.h() % problem_size_.stride_h) &&
!(unscaled_coord.w() % problem_size_.stride_w) &&
coord.n() < problem_size_.N &&
coord.d() >= 0 && coord.d() < problem_size_.Z &&
coord.h() >= 0 && coord.h() < problem_size_.P &&
coord.w() >= 0 && coord.w() < problem_size_.Q &&
coord.c() < problem_size_.K;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,020 | C | 31.037791 | 111 | 0.641742 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_analytic.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dDgradFilterTileAccessIteratorAnalytic;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradFilterTileAccessIteratorAnalytic strided dgrad needs special handling to skip MMAs
// on non-contributing w positions
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradFilterTileAccessIteratorAnalytic <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kStrided,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or larger.");
//
// Parameters structure
//
using Params = Conv2dAnalyticParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
// For a fixed filter position (r,s) find and fill offset_k_, offset_c_ in strided and contiguous dimension
int filter_r_;
int filter_s_;
int start_r_;
int start_s_;
int offset_k_[ThreadMap::Iterations::kStrided];
int offset_c_[ThreadMap::Iterations::kContiguous];
public:
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_r_(start_r),
filter_s_(start_s),
start_r_(start_r),
start_s_(start_s) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
offset_c_[c] = threadblock_offset.column() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
}
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] =
threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// Moves filter_s
filter_s_ += problem_size_.stride_w;
if (filter_s_ < problem_size_.S) {
return;
}
// Restore filter_s
filter_s_ = start_s_;
// Move filter_r
filter_r_ += problem_size_.stride_h;
if (filter_r_ < problem_size_.R) {
return;
}
// Restore filter_r
filter_r_ = start_r_;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] += Shape::kRow * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the filter tensor w that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int k = offset_k_[iteration_strided_];
int c = offset_c_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements;
return TensorCoord(k, filter_r_, filter_s_, c);
}
/// Returns true if the current coordinate is within the filter tensor w
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.K && coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradFilterTileAccessIteratorAnalytic unity strided dgrad is more performant for dgrad
// on problem sizes with stride = {1x1}
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradFilterTileAccessIteratorAnalytic <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kUnity,
AccessType_
>{
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or larger.");
//
// Parameters structure
//
using Params = Conv2dAnalyticParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
// For a fixed filter position (r,s) find and fill offset_k_, offset_c_ in strided and contiguous dimension
int filter_r_;
int filter_s_;
int offset_k_[ThreadMap::Iterations::kStrided];
int offset_c_[ThreadMap::Iterations::kContiguous];
public:
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
offset_c_[c] = threadblock_offset.column() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
}
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] =
threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] += Shape::kRow * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the filter tensor w that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int k = offset_k_[iteration_strided_];
int c = offset_c_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements;
return TensorCoord(k, filter_r_, filter_s_, c);
}
/// Returns true if the current coordinate is within the filter tensor w
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.K && coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 15,306 | C | 30.495885 | 110 | 0.664576 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorCxRSKx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dFpropFilterTileAccessIteratorOptimized{
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
struct Params : Conv2dFpropFilterIteratorOptimizedParams<Layout> {
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Conv2dFpropFilterIteratorOptimizedParams<Layout> const &base):
Conv2dFpropFilterIteratorOptimizedParams<Layout>(base) { }
CUTLASS_HOST_DEVICE
Params(
Conv2dProblemSize const &problem_size,
Layout const &layout
):
Conv2dFpropFilterIteratorOptimizedParams<Layout>(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
) {
}
};
private:
Conv2dFpropFilterIteratorOptimizedParams<Layout> const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
uint32_t predicates_[kAccessesPerVector];
int filter_rs_;
int filter_c_;
int channels_per_group_;
//
// Assertions
//
// We map predicates into bits packed in this uint32_t container
static_assert(ThreadMap::Iterations::kStrided < sizeof(predicates_) * 8,
"Currently, the number of loads per iteration is limited by the size of the predicates container.");
public:
CUTLASS_HOST_DEVICE
Conv2dFpropFilterTileAccessIteratorOptimized(
Conv2dFpropFilterIteratorOptimizedParams<Layout> const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_{0},
filter_rs_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.row() + thread_coord.contiguous();
Index column = threadblock_offset.column() + thread_coord.strided();
channels_per_group_ = problem_size_.C / problem_size_.groups;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
uint32_t pred = ((column + s * ThreadMap::Delta::kStrided < problem_size_.K) ? 1u : 0);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
predicates_[v_idx] |= (pred << s);
}
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= channels_per_group_);
}
pointer_ += (
params_.layout({filter_c_, column})
) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
LongIndex next = params_.inc_next_rs;
// moves to the next tile
++filter_rs_;
if (filter_rs_ == params_.RS) {
filter_rs_ = 0;
next = params_.inc_next_c;
filter_c_ += params_.filter_c_delta;
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= channels_per_group_);
}
pointer_ += next;
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(int v, bool clear = true) {
predicates_[v] = clear ? 0u : predicates_[v];
}
/// Returns true if the current coordinate is within the filter tensor W
CUTLASS_HOST_DEVICE
bool valid() {
return (predicates_[iteration_vector_] & (1u << iteration_strided_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dFpropFilterTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
// Move to the next K coordinate within the tile
pointer_ += params_.inc_next_k;
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
if (platform::is_same<Layout, layout::TensorCxRSKx<32>>::value) {
if (problem_size.K % 32) {
return Status::kErrorInvalidProblem;
}
}
if (platform::is_same<Layout, layout::TensorCxRSKx<64>>::value) {
if (problem_size.K % 64) {
return Status::kErrorInvalidProblem;
}
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 10,387 | C | 31.666667 | 105 | 0.660441 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/predicated_scale_bias_vector_access_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of scale and bias vectors.
This iterator uses masks to guard out-of-bounds accesses.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedScaleBiasVectorAccessIterator
///
template <typename ThreadblockShape,
typename Element,
typename Layout>
class PredicatedScaleBiasVectorAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for fprop pitch-linear data.
///
template <typename ThreadblockShape_, typename Element_>
class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_,
Element_,
layout::PitchLinear> {
public:
using ThreadblockShape = ThreadblockShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
static int const kThreads = ThreadblockShape::kContiguous / kElementsPerAccess;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
using Params = PredicatedScaleBiasVectorAccessIteratorParams;
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
BytePointer pointer_;
int problem_size_trs;
int problem_size_c;
int filter_trs_;
TensorCoord thread_offset_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv2dProblemSize const &problem_size,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
problem_size_trs(problem_size.R * problem_size.S),
problem_size_c(problem_size.C),
filter_trs_(0) {
pointer_ = (thread_id < kThreads)
? reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(scale_pointer))
: reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(bias_pointer));
// Per-thread offset in logical coordinates of tensor
int thread_base = (thread_id < kThreads) ? 0 : kThreads;
thread_offset_ =
threadblock_offset +
TensorCoord((thread_id - thread_base) * kElementsPerAccess, 0);
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv3dProblemSize const &problem_size,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
problem_size_trs(problem_size.T * problem_size.R * problem_size.S),
problem_size_c(problem_size.C),
filter_trs_(0) {
pointer_ = (thread_id < kThreads)
? reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(scale_pointer))
: reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(bias_pointer));
// Per-thread offset in logical coordinates of tensor
int thread_base = (thread_id < kThreads) ? 0 : kThreads;
thread_offset_ =
threadblock_offset +
TensorCoord((thread_id - thread_base) * kElementsPerAccess, 0);
set_iteration_index(0);
}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv2dProblemSize const &problem_size,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorAccessIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv3dProblemSize const &problem_size,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorAccessIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {}
/// Advances an iterator along logical dimensions of matrix in units of whole threadblock tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
thread_offset_ =
thread_offset_ +
TensorCoord(ThreadblockShape::kContiguous * tile_offset.contiguous(), 0);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
(thread_offset_.contiguous() * sizeof_bits<Element>::value / 8));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator &operator++() {
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_trs_;
if (filter_trs_ == problem_size_trs) {
filter_trs_ = 0;
add_tile_offset(TensorCoord(1, 0));
}
}
/// Increment and return an instance to self.
CUTLASS_DEVICE
PredicatedScaleBiasVectorAccessIterator operator++(int) {
PredicatedScaleBiasVectorAccessIterator self(*this);
operator++();
return self;
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
uint32_t enabled = 0;
#if defined(_MSC_VER) || (__CUDACC_VER_MAJOR__ < 11)
enabled = threadIdx.x < kThreads * 2;
#else
asm volatile(
"{\n"
" .reg .u32 tid_reg;\n"
" .reg .pred p;\n"
" mov.u32 tid_reg, %%tid.x;\n"
" setp.lt.u32 p, tid_reg, %1;\n"
" selp.u32 %0, 1, 0, p;\n"
"}\n" : "+r"(enabled) :"n"(kThreads * 2));
#endif
return ((thread_offset_.contiguous() < problem_size_c) && enabled);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename ThreadblockShape_,
typename Element_>
class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_,
Element_,
layout::RowMajor> {
public:
using ThreadblockShape = ThreadblockShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedScaleBiasVectorAccessIterator<
layout::PitchLinearShape<ThreadblockShape::kColumn, ThreadblockShape::kRow>,
Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
using Params = PredicatedScaleBiasVectorAccessIteratorParams;
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Extent of tensor
Conv2dProblemSize const &problem_size,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params, problem_size, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Extent of tensor
Conv3dProblemSize const &problem_size,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params, problem_size, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Conv2dProblemSize const &problem_size, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorAccessIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Conv3dProblemSize const &problem_size, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorAccessIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Advances an iterator along logical dimensions of matrix in units of whole
/// threadblock tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator operator++(int) {
PredicatedScaleBiasVectorAccessIterator self(*this);
operator++();
return self;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
void advance() {
iterator_.advance();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 16,915 | C | 34.915074 | 100 | 0.640142 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_wgrad_activation_tile_access_iterator_analytic.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (activation tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dWgradActivationTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
using Params = Conv2dAnalyticParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
// Filter postion (r,s,c) in contiguous dimension stays constant for each gemm_iteration_k
int filter_r_[ThreadMap::Iterations::kContiguous];
int filter_s_[ThreadMap::Iterations::kContiguous];
int filter_c_[ThreadMap::Iterations::kContiguous];
int offset_npq_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dWgradActivationTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr))
{
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
// initialize r,s,c filter position for every contiguous iteration
CUTLASS_PRAGMA_UNROLL
for(int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int rsc_offset = threadblock_offset.column() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
filter_r_[c] = rsc_offset / (problem_size_.S * problem_size_.C);
int residual = rsc_offset % (problem_size_.S * problem_size_.C);
filter_s_[c] = residual / problem_size_.C;
filter_c_[c] = residual % problem_size_.C;
}
// initialize n, p, q offset for every strided iteration
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_npq_[s] = threadblock_offset.row() + thread_coord.strided()
+ s * ThreadMap::Delta::kStrided;
}
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_npq_) in GEMM-B by a CTA-K tile
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_npq_[s] += Shape::kRow * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the activation tensor x that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int r, s, c;
if (kAccessesPerVector == 1) {
/// One 128b aligned access fetching more than one element
c = filter_c_[iteration_contiguous_];
r = filter_r_[iteration_contiguous_];
s = filter_s_[iteration_contiguous_];
}
else {
/// Multiple access to support non-128b alignment in contiguous dimenstion
c = (filter_c_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements) % problem_size_.C;
int wrap_c = (filter_c_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements) / problem_size_.C;
s = (filter_s_[iteration_contiguous_] + wrap_c) % problem_size_.S;
int wrap_s = (filter_s_[iteration_contiguous_] + wrap_c) / problem_size_.S;
r = filter_r_[iteration_contiguous_] + wrap_s;
}
if (problem_size_.mode == Mode::kConvolution) {
r = (problem_size_.R - 1 - r);
s = (problem_size_.S - 1 - s);
}
int n = offset_npq_[iteration_strided_] / (problem_size_.P * problem_size_.Q);
int residual = offset_npq_[iteration_strided_] % (problem_size_.P * problem_size_.Q);
int p = residual / problem_size_.Q;
int q = residual % problem_size_.Q;
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
return TensorCoord(n, h, w, c);
}
/// Returns true if the current coordinate is within the activation tensor x
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dWgradActivationTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 10,350 | C | 35.192308 | 116 | 0.658164 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorCxRSKx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_
>
class Conv3dFpropFilterTileAccessIteratorOptimized{
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
struct Params : Conv3dFpropFilterIteratorOptimizedParams<Layout> {
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Conv3dFpropFilterIteratorOptimizedParams<Layout> const &base):
Conv3dFpropFilterIteratorOptimizedParams<Layout>(base) { }
CUTLASS_HOST_DEVICE
Params(
Conv3dProblemSize const &problem_size,
Layout const &layout
):
Conv3dFpropFilterIteratorOptimizedParams<Layout>(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
) {
}
};
private:
Conv3dFpropFilterIteratorOptimizedParams<Layout> const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
uint32_t predicates_;
int filter_trs_;
int filter_c_;
//
// Assertions
//
// We map predicates into bits packed in this uint32_t container
static_assert(ThreadMap::Iterations::kStrided < sizeof(predicates_) * 8,
"Currently, the number of loads per iteration is limited by the size of the predicates container.");
public:
CUTLASS_HOST_DEVICE
Conv3dFpropFilterTileAccessIteratorOptimized(
Conv3dFpropFilterIteratorOptimizedParams<Layout> const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_{0},
filter_trs_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.row() + thread_coord.contiguous();
Index column = threadblock_offset.column() + thread_coord.strided();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
uint32_t pred = ((column + s * ThreadMap::Delta::kStrided < problem_size_.K) ? 1u : 0);
predicates_ |= (pred << s);
}
if (filter_c_ >= problem_size.C) {
predicates_ = 0u;
}
pointer_ += (
params_.layout({filter_c_, column})
) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
LongIndex next = params_.inc_next_trs;
// moves to the next tile
++filter_trs_;
if (filter_trs_ == params_.TRS) {
filter_trs_ = 0;
next = params_.inc_next_c;
filter_c_ += params_.filter_c_delta;
}
if (filter_c_ >= problem_size_.C) {
predicates_ = 0;
}
pointer_ += next;
}
/// Returns true if the current coordinate is within the filter tensor W
CUTLASS_HOST_DEVICE
bool valid() {
return (predicates_ & (1u << iteration_strided_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dFpropFilterTileAccessIteratorOptimized &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
// Move to the next K coordinate within the tile
pointer_ += params_.inc_next_k;
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,891 | C | 30.985611 | 105 | 0.658981 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorNCxHWx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dFpropActivationTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
using Mask = uint64_t;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dFpropActivationIteratorOptimizedParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
// current filter position (r, s)
int filter_r_;
int filter_s_;
int filter_c_;
Index masks_[ThreadMap::Iterations::kStrided][kAccessesPerVector][2];
public:
CUTLASS_HOST_DEVICE
Conv2dFpropActivationTileAccessIteratorOptimized(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
filter_c_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.column() + thread_coord.contiguous();
int offset_n[ThreadMap::Iterations::kStrided];
int offset_p[ThreadMap::Iterations::kStrided];
int offset_q[ThreadMap::Iterations::kStrided];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_npq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// offset_n[s] = offset_npq / (problem_size_.P * problem_size_.Q);
// int residual = offset_npq % (problem_size_.P * problem_size_.Q);
//
// offset_p[s] = residual / problem_size_.Q;
// offset_q[s] = residual % problem_size_.Q;
//
int residual;
params.pq_divmod(offset_n[s], residual, offset_npq);
params.q_divmod(offset_p[s], offset_q[s], residual);
TensorCoord coord = at_(offset_n[s], offset_p[s], offset_q[s], 0, 0);
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
clear_mask();
CUTLASS_PRAGMA_NO_UNROLL
for (int r = 0; r < problem_size_.R; ++r) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int r_ = r;
if (problem_size_.mode == Mode::kConvolution) {
r_ = problem_size_.R - 1 - r;
}
int h = offset_p[s_idx] * problem_size_.stride_h - problem_size_.pad_h + r_ * problem_size_.dilation_h;
bool pred = (offset_n[s_idx] < problem_size_.N && h >= 0 && h < problem_size_.H);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][0] |= (pred << r);
}
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int s = 0; s < problem_size_.S; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int s_ = s;
if (problem_size_.mode == Mode::kConvolution) {
s_ = problem_size_.S - 1 - s;
}
int w = offset_q[s_idx] * problem_size_.stride_w - problem_size_.pad_w + s_ * problem_size_.dilation_w;
bool pred = (w >= 0 && w < problem_size_.W);
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
masks_[s_idx][v_idx][1] |= (pred << s);
}
}
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= problem_size_.C);
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided});
}
private:
/// Returns the coordinate in the activations tensor X that is correspoinding to
// output npq and filter position r, s
CUTLASS_HOST_DEVICE
TensorCoord at_(int n, int p, int q, int r, int s) const {
if (problem_size_.mode == Mode::kConvolution) {
r = problem_size_.R - 1 - r;
s = problem_size_.S - 1 - s;
}
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
return TensorCoord(n, h, w, filter_c_);
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset;
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_HOST_DEVICE
void advance() {
int next_idx = 0;
// moves to the next tile
++filter_s_;
if (filter_s_ == problem_size_.S) {
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
next_idx = 1;
}
else {
filter_r_ = 0;
next_idx = 2;
}
}
add_byte_offset_(params_.inc_next[next_idx]);
if (next_idx == 2) {
filter_c_ += params_.filter_c_delta;
}
CUTLASS_PRAGMA_UNROLL
for (int v_idx = 0; v_idx < kAccessesPerVector; ++v_idx) {
clear_mask(v_idx, filter_c_ + v_idx * AccessType::kElements >= problem_size_.C);
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
masks_[s][v][0] = clear ? 0 : masks_[s][v][0];
masks_[s][v][1] = clear ? 0 : masks_[s][v][1];
}
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask(int v, bool clear = true) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][v][0] = clear ? 0 : masks_[s][v][0];
masks_[s][v][1] = clear ? 0 : masks_[s][v][1];
}
}
CUTLASS_HOST_DEVICE
bool valid() {
return
(masks_[iteration_strided_][iteration_vector_][0] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][iteration_vector_][1] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dFpropActivationTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
if (platform::is_same<Layout, layout::TensorNCxHWx<32>>::value) {
if (problem_size.C % 32) {
return Status::kErrorInvalidProblem;
}
}
if (platform::is_same<Layout, layout::TensorNCxHWx<64>>::value) {
if (problem_size.C % 64) {
return Status::kErrorInvalidProblem;
}
}
// Conv2dFpropActivationTileAccessIteratorOptimized has constraint on filter positions
// due to the number of mask bits.
if (problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 13,664 | C | 31.304964 | 114 | 0.626683 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/threadblock/depthwise_direct_conv_params.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Shape_,
typename OutputTileShape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess> >
class DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using OutputTileShape = OutputTileShape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1, "Require Iterations::kContiguous == 1");
static_assert(OutputTileShape::kN == 1, "Require OutputTileShape::kN == 1");
static_assert(OutputTileShape::kC == Shape::kColumn, "Require OutputTile shape == channels per threadblock");
//
// Parameters structure
//
using Params = Depthwise2dFpropDirectConvParams<Layout>;
private:
Conv2dProblemSize const &problem_size_;
Params const ¶ms_;
char const *pointer_;
// Base channels for current threadblock
int base_c_;
// Base activation index for current threadblock
int offset_intial_npq_;
// Base activation coord for current threadblock
TensorCoord activatioin_base_;
// Intial thread positioin
int offset_initial_hwc_;
// Overall load instruction per thread.
int iterator_load_;
// thread loading position.
int iterator_hwc_;
// Number of loads for activations tensor X.
const int number_of_loads_;
public:
CUTLASS_HOST_DEVICE
DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset =
MatrixCoord()
)
: params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
offset_intial_npq_(threadblock_offset.row()),
offset_initial_hwc_(thread_idx),
iterator_load_(0),
number_of_loads_(params.activation_load_count) {
base_c_ = threadblock_offset.column();
set_activation_coord(offset_intial_npq_);
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
void set_activation_coord(int offset_npq) {
int offset_inital_n, offset_inital_p, offset_inital_q;
int residual;
params_.pq_divmod(offset_inital_n, residual, offset_npq);
params_.q_divmod(offset_inital_p, offset_inital_q, residual);
int base_n = offset_inital_n;
int base_h =
offset_inital_p * OutputTileShape::kH * problem_size_.stride_h - problem_size_.pad_h;
int base_w =
offset_inital_q * OutputTileShape::kW * problem_size_.stride_w - problem_size_.pad_w;
activatioin_base_ = TensorCoord(base_n, base_h, base_w, base_c_);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(
problem_size,
layout,
{Shape::kRow, Shape::kColumn},
{OutputTileShape::kN, OutputTileShape::kH, OutputTileShape::kW, OutputTileShape::kC},
sizeof_bits<Element>::value,
ThreadMap::kThreads,
ThreadMap::Detail::ShapeVec::kContiguous,
ThreadMap::kElementsPerAccess);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iterator_hwc_ = offset_initial_hwc_ + index * ThreadMap::kThreads;
iterator_load_ = index;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// Go to next threadblock
offset_intial_npq_ += problem_size_.split_k_slices;
set_activation_coord(offset_intial_npq_);
}
/// Returns the coordinate in the activations tensor X that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int c = iterator_hwc_ % ThreadMap::Detail::ShapeVec::kContiguous ;
int next = iterator_hwc_ / ThreadMap::Detail::ShapeVec::kContiguous ;
int h, w;
params_.activation_tile_w_divmod(h, w, next) ;
c = c * AccessType::kElements;
return activatioin_base_ + TensorCoord(0, h, w, c);
}
/// Returns true if the current coordinate is within the activations tensor X
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N && coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W && coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
AccessType const *ptr =
reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
return ptr;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized &operator++() {
++iterator_load_;
iterator_hwc_ += ThreadMap::kThreads;
if (iterator_load_ < number_of_loads_) {
return *this;
}
iterator_load_ = 0;
iterator_hwc_ = offset_initial_hwc_;
return *this;
}
/// Determines the activation size loaded by iterator
CUTLASS_HOST_DEVICE
int get_load_size() {
return params_.activation_size;
}
/// Determines the iterations needed
CUTLASS_HOST_DEVICE
int get_iteration_num() {
return number_of_loads_;
}
/// Determines whether the Depthwise fprop can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 9,899 | C | 32.904109 | 111 | 0.664411 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a multistage threadblock-scoped fused activation's
scale+bias+relu and Implicit GEMM Convolution kernel.
The original implicit gemm will store out-of-bound data as zeroes in the
shared memory because zeros into the tensor core, zeroes out of the tensor
cores. The result is remained the same. When fusing scale+bias+relu
into the mainloop, it is no longer true because
0 x scale + bias = bias
which is no longer always 0. So, instead of storing zeroes, this fused
kernel stores the out-of-bound data as a special NaN (0x7eff), when applying
scale+bias+relu, the code is like
if (data == 0x7eff)
data = 0;
else
data = scale+bias+relu(data, scale, bias);
See include/cutlass/conv/warp/scale_bias_relu_transformation.h for the
elementwise computation. See include/cutlass/arch/memory_sm80.h for nan fill.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/cache_operation.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/scale_bias_tile_iterator.h"
#include "cutlass/conv/warp/scale_bias_relu_transform.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Element type of scale and bias vectors
typename ElementScaleBias_,
/// Layout of scale and bias vectors
typename LayoutScaleBias_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// WarpIterator to load Scale or Bias vector from the shared memory
typename WarpIteratorScaleBias_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class MmaFpropFusionBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Element type of scale and bias vectors
using ElementScaleBias = ElementScaleBias_;
/// Layout of scale and bias vectors
using LayoutScaleBias = LayoutScaleBias_;
///< Policy describing tuning details
using Policy = Policy_;
///< WarpIterator to load Scale or Bias vector from the shared memory
using WarpIteratorScaleBias = WarpIteratorScaleBias_;
//
// Dependent types
//
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = cutlass::gemm::GemmShape<Shape::kM / WarpGemm::kM,
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator::Policy::MmaShape::kK);
/// Number of stages
static int const kStages = Stages;
/// Tensor reference to the A operand
using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;
/// Tensor reference to the scale and bias vectors
using TensorRefScaleBias = TensorRef<ElementScaleBias, LayoutScaleBias>;
/// Tensor reference to the B operand
using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;
static_assert(kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
static_assert((kWarpGemmIterations % 2) == 0,
"Inner loop iteration must be an even number.");
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
class SharedStorage {
public:
//
// Type definitions
//
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,
Shape::kK * kStages +
Policy::SmemPaddingA::kColumn>;
/// Shape of the A scale and bias vectors in shared memory
using ShapeScaleBias =
MatrixShape<1 + Policy::SmemPaddingA::kRow,
2 * Shape::kK * kStages + Policy::SmemPaddingA::kColumn>;
/// Shape of the B matrix operand in shared memory
using ShapeB =
MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow,
Shape::kN + Policy::SmemPaddingB::kColumn>;
public:
//
// Data members
//
/// Buffer for A operand
AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A;
/// Buffer for B operand
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;
/// Buffer for A operand Scale and Bias
AlignedBuffer<ElementScaleBias, ShapeScaleBias::kCount> operand_A_scale_bias;
public:
//
// Methods
//
/// Returns a layout object for the A matrix
CUTLASS_DEVICE
static typename Operator::LayoutA LayoutA() {
return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator::LayoutB LayoutB() {
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});
}
/// Returns a layout object for the A scale and bias vectors
CUTLASS_DEVICE
static LayoutScaleBias LayoutScaleBias() {
return LayoutScaleBias::packed(
{ShapeScaleBias::kRow, ShapeScaleBias::kColumn});
}
/// Returns a TensorRef to the A operand
CUTLASS_HOST_DEVICE
TensorRefA operand_A_ref() {
return TensorRefA{operand_A.data(), LayoutA()};
}
/// Returns a TensorRef to the B operand
CUTLASS_HOST_DEVICE
TensorRefB operand_B_ref() {
return TensorRefB{operand_B.data(), LayoutB()};
}
/// Returns a TensorRef to the A operand Scale vector
CUTLASS_HOST_DEVICE
TensorRefScaleBias operand_A_scale_bias_ref() {
return TensorRefScaleBias{operand_A_scale_bias.data(), LayoutScaleBias()};
}
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A operand from shared memory
typename Operator::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of A operand scale and bias vector
/// from shared memory
WarpIteratorScaleBias warp_tile_iterator_A_scale_bias_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator::IteratorB warp_tile_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaFpropFusionBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx),
warp_tile_iterator_A_scale_bias_(
shared_storage.operand_A_scale_bias_ref(), lane_idx),
warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Iterates over vectors of scale and bias vector in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorScaleBias_,
/// Iterates over vectors of scale and bias vector in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorScaleBias_,
/// Cache operation for scale/bias operand
cutlass::arch::CacheOperation::Kind CacheOpScaleBias,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// WarpIterator to load Scale or Bias vector from the shared memory
typename WarpIteratorScaleBias_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class ImplicitGemmFpropFusionMultistage
: public MmaFpropFusionBase<Shape_, typename IteratorScaleBias_::Element,
typename IteratorScaleBias_::Layout, Policy_,
WarpIteratorScaleBias_, Stages> {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Iterates over tiles of the scale and bias vectors in global memory
using IteratorScaleBias = IteratorScaleBias_;
///< WarpIterator to load Scale or Bias vector from the shared memory
using WarpIteratorScaleBias = WarpIteratorScaleBias_;
///< Policy describing tuning details
using Policy = Policy_;
///< Base class
using Base = MmaFpropFusionBase<Shape_, typename IteratorScaleBias::Element,
typename IteratorScaleBias::Layout, Policy,
WarpIteratorScaleBias, Stages>;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
using SmemIteratorScaleBias = SmemIteratorScaleBias_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static cutlass::arch::CacheOperation::Kind const kCacheOpScaleBias =
CacheOpScaleBias;
//
// Dependent types
//
/// Fragment of accumulator tile
using ElementC = typename Policy::Operator::ElementC;
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Internal structure exposed for introspection.
struct Detail {
static_assert(Base::kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA =
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
};
private:
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpLoadedFragmentScaleBias =
typename WarpIteratorScaleBias::Fragment;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of A operand scale vector to shared memory
SmemIteratorScaleBias smem_iterator_A_scale_bias_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
ImplicitGemmFpropFusionMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_A_scale_bias_(shared_storage.operand_A_scale_bias_ref(),
thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_A_scale_bias_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A,
IteratorScaleBias &iterator_A_scale_bias,
IteratorB &iterator_B, int group_start_A = 0,
int group_start_B = 0) {
iterator_A.set_iteration_index(group_start_A);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess / 8;
// Uses nan fill for out of bound data
cutlass::arch::cp_async_nan<kSrcBytes, kCacheOpA>(
dst_ptr, iterator_A.get(), iterator_A.valid());
++iterator_A;
++this->smem_iterator_A_;
}
}
// Async Copy for operand A scale and bias vector. Scale and bias vectors
// are small. One iteration is enough.
if (group_start_A == 0) {
typename IteratorScaleBias::AccessType *dst_ptr =
reinterpret_cast<typename IteratorScaleBias::AccessType *>(
this->smem_iterator_A_scale_bias_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorScaleBias::Element>::value *
IteratorScaleBias::kElementsPerAccess / 8;
cutlass::arch::cp_async<kSrcBytes, kCacheOpScaleBias>(
dst_ptr, iterator_A_scale_bias.get(), iterator_A_scale_bias.valid());
}
iterator_B.set_iteration_index(group_start_B);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr, iterator_B.get(), iterator_B.valid());
++iterator_B;
++this->smem_iterator_B_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
///< iterator over scale and bias vectors in global memory
IteratorScaleBias iterator_A_scale_bias,
///< initial value of accumulator
FragmentC const &src_accum,
///< number of iterations per channel
int gemm_k_iterations_per_channel = 0,
///< Imaginary strides used for planar-complex only - ignored here
int64_t imag_stride_A = 0,
int64_t imag_stride_B = 0) {
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess / 8;
// Uses Nan fill for out of bound data
cutlass::arch::cp_async_nan<kSrcBytes, kCacheOpA>(
dst_ptr, iterator_A.get(), iterator_A.valid());
++iterator_A;
++this->smem_iterator_A_;
}
// Async Copy for operand A scale and bias vectors. Scale and bias
// vectors are small. One iteration is enough.
{
typename IteratorScaleBias::AccessType *dst_ptr =
reinterpret_cast<typename IteratorScaleBias::AccessType *>(
this->smem_iterator_A_scale_bias_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorScaleBias::Element>::value *
IteratorScaleBias::kElementsPerAccess / 8;
cutlass::arch::cp_async<kSrcBytes, kCacheOpScaleBias>(
dst_ptr, iterator_A_scale_bias.get(), iterator_A_scale_bias.valid());
}
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr, iterator_B.get(), iterator_B.valid());
++iterator_B;
++this->smem_iterator_B_;
}
// Move to the next stage
iterator_A.advance();
iterator_A_scale_bias.advance();
iterator_B.advance();
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_A_scale_bias_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Waits until kStages-2 stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA warp_loaded_frag_A[2];
WarpLoadedFragmentB warp_loaded_frag_B[2];
WarpLoadedFragmentScaleBias warp_loaded_frag_A_scale_bias[2];
WarpTransformedFragmentA warp_transformed_frag_A[2];
WarpTransformedFragmentB warp_transformed_frag_B[2];
Operator warp_mma;
cutlass::conv::warp::FpropScaleBiasReluTransform<WarpTransformedFragmentA,
WarpLoadedFragmentScaleBias>
elementwise_transform;
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_A_scale_bias_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
this->warp_tile_iterator_A_scale_bias_.load(
warp_loaded_frag_A_scale_bias[0]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_A_scale_bias_;
++this->warp_tile_iterator_B_;
// Start issuing the first group of the next stage outside of the mainloop
copy_tiles_and_advance(iterator_A, iterator_A_scale_bias, iterator_B);
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0],
warp_loaded_frag_A[0], warp_loaded_frag_B[0]);
elementwise_transform(warp_transformed_frag_A[0],
warp_loaded_frag_A_scale_bias[0]);
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_scale_bias_.set_kgroup_index(
(warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_A_scale_bias_.load(
warp_loaded_frag_A_scale_bias[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_A_scale_bias_;
++this->warp_tile_iterator_B_;
if (warp_mma_k > 0) {
warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
warp_loaded_frag_A[warp_mma_k % 2],
warp_loaded_frag_B[warp_mma_k % 2]);
elementwise_transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_loaded_frag_A_scale_bias[warp_mma_k % 2]);
}
warp_mma(
accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
accum
);
// Issue global->shared copies for the next stage
int group_start_iteration_A, group_start_iteration_B;
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
group_start_iteration_A = 0;
group_start_iteration_B = 0;
} else {
group_start_iteration_A =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB;
}
copy_tiles_and_advance(iterator_A, iterator_A_scale_bias, iterator_B,
group_start_iteration_A,
group_start_iteration_B);
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_transformed_frag_B[(warp_mma_k + 1) % 2],
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
elementwise_transform(
warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_A_scale_bias[(warp_mma_k + 1) % 2]);
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages of cp.async have committed
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A.advance();
iterator_A_scale_bias.advance();
iterator_B.advance();
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_A_scale_bias_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_A_scale_bias_.add_tile_offset(
{0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_A_scale_bias_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
}
}
}
// Insert fence and wait for all outstanding cp.async operations to commit.
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 30,106 | C | 36.493151 | 100 | 0.632698 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/depthwise_direct_conv_params.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Extracts the host-params objects into non-template code.
*/
#pragma once
#define TRACE_CONV_PARAMS_INITIALIZERS_ENABLED 0
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#if TRACE_CONV_PARAMS_INITIALIZERS_ENABLED
#include <fstream>
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized
template<typename Layout_ = layout::TensorNHWC >
struct Depthwise2dFpropDirectConvParams;
/// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation
template<typename Layout_ = layout::TensorNHWC >
struct Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams;
/// Parameters structure used for DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized
template<typename Layout_ = layout::TensorNHWC >
struct Depthwise2dFpropDirectConvFilterIteratorParams;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized
template<>
struct Depthwise2dFpropDirectConvParams<layout::TensorNHWC> {
using Layout = layout::TensorNHWC;
Layout layout;
int32_t activation_tile_h;
int32_t activation_tile_w;
int32_t activation_tile_hw;
FastDivmod activation_tile_w_divmod;
int filter[2];
int stride[2];
int dilation[2];
int inc_next[2];
FastDivmod pq_divmod;
FastDivmod q_divmod;
int activation_load_count;
int activation_storage_elements;
int activation_size;
//
// Methods
//
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvParams() { }
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
MatrixCoord threadblock_shape, ///< CTA threadblock Shape
Layout::TensorCoord threadblock_output_shape, ///< Output tile Shape per threadblock
const int element_size_bits, ///< bits of activation element
const int thread_count, ///< threads per threadblock
const int thread_count_contiguous, ///< number of threads for continuous dimension
const int element_per_load) ///< element per each load
: layout(layout) {
filter[0] = problem_size.S;
filter[1] = problem_size.R;
stride[0] = problem_size.stride_w;
stride[1] = problem_size.stride_h;
dilation[0] = problem_size.dilation_w;
dilation[1] = problem_size.dilation_h;
// Compute activation_tile size per threadblock because stride and dilation are runtime params.
activation_tile_h = (threadblock_output_shape.h() - 1) * problem_size.stride_h +
(problem_size.R - 1) * problem_size.dilation_h + 1;
activation_tile_w = (threadblock_output_shape.w() - 1) * problem_size.stride_w +
(problem_size.S - 1) * problem_size.dilation_w + 1;
activation_tile_hw = activation_tile_h * activation_tile_w;
activation_tile_w_divmod = FastDivmod(activation_tile_w);
/// Below two values could not be templatized because the stride and dilation are runtime params
activation_load_count = (thread_count_contiguous * activation_tile_hw + (thread_count - 1)) / thread_count;
activation_storage_elements = activation_load_count * element_per_load * thread_count;
activation_size = activation_storage_elements * element_size_bits / 8;
// Fastdivmod for output P, Q
int tiles_p =
(problem_size.P + (threadblock_output_shape.h() - 1)) / (threadblock_output_shape.h());
int tiles_q = (problem_size.Q + (threadblock_output_shape.w() - 1)) /
(threadblock_output_shape.w());
pq_divmod = FastDivmod(tiles_p * tiles_q);
q_divmod = FastDivmod(tiles_q);
// next S
inc_next[0] = problem_size.dilation_w;
// next R
inc_next[1] = (activation_tile_w * problem_size.dilation_h - (problem_size.S - 1) * problem_size.dilation_w);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation
template <>
struct Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams<layout::TensorNHWC> {
using Layout = layout::TensorNHWC;
Layout layout;
FastDivmod pq_divmod;
FastDivmod q_divmod;
int activation_size;
//
// Methods
//
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams() {}
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< Layout object
MatrixCoord threadblock_shape, ///< Threadblock Shape
Layout::TensorCoord threadblock_output_shape, ///< Output tile Shape per threadblock
const int activation_size_ ///< Activation size loaded by iterator
)
: layout(layout),
activation_size(activation_size_) {
// Fastdivmod for output P, Q
int tiles_p =
(problem_size.P + (threadblock_output_shape.h() - 1)) / (threadblock_output_shape.h());
int tiles_q =
(problem_size.Q + (threadblock_output_shape.w() - 1)) / (threadblock_output_shape.w());
pq_divmod = FastDivmod(tiles_p * tiles_q);
q_divmod = FastDivmod(tiles_q);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized
template <>
struct Depthwise2dFpropDirectConvFilterIteratorParams<layout::TensorNHWC> {
using Layout = layout::TensorNHWC;
Layout layout;
int filter_size;
bool is_convolution;
//
// Methods
//
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvFilterIteratorParams() {}
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvFilterIteratorParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< Layout object
MatrixCoord threadblock_shape, ///< Threadblock Shape
const int filter_size_) ///< Filter size loaded by iterator
: layout(layout),
filter_size(filter_size_),
is_convolution(problem_size.mode == Mode::kConvolution){}
};
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,871 | C | 37.406926 | 113 | 0.655845 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dWgradOutputGradientTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
struct Params : Conv3dWgradOutputGradientIteratorOptimizedParams {
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() {}
CUTLASS_HOST_DEVICE
Params(Conv3dWgradOutputGradientIteratorOptimizedParams const &base)
: Conv3dWgradOutputGradientIteratorOptimizedParams(base) {}
CUTLASS_HOST_DEVICE
Params(Conv3dProblemSize const &problem_size, Layout const &layout)
: Conv3dWgradOutputGradientIteratorOptimizedParams(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}) {}
};
private:
Params const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
uint32_t predicates_;
int filter_k_;
int offset_nzpq_;
public:
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientTileAccessIteratorOptimized(
Params const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_(0),
filter_k_(0),
offset_nzpq_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.row() + thread_coord.contiguous();
offset_nzpq_ = threadblock_offset.column() + thread_coord.strided();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int filter_k = filter_k_ + c * ThreadMap::Delta::kContiguous;
int offset_nzpq = offset_nzpq_ + s * ThreadMap::Delta::kStrided;
bool predicate = valid_(at_(offset_nzpq, filter_k));
uint32_t pred = (predicate ? 1u : 0);
int pred_idx = c + s * ThreadMap::Iterations::kContiguous;
predicates_ |= (pred << pred_idx);
}
}
// Offset pointer to (iteration_strided_, iteration_contiguous_) = (0, 0)
pointer_ += (
offset_nzpq_ * params.layout.stride()[0] + filter_k_
) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_npq_) in GEMM-A by a CTA-K tile
offset_nzpq_ += Shape::kColumn * problem_size_.split_k_slices;
// Clear predicates if needed
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
if (offset_nzpq_ + s * ThreadMap::Delta::kStrided >= params_.NZPQ) {
uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous);
predicates_ = (predicates_ & (~kClearMask));
}
}
pointer_ += params_.inc_next_nzpq;
}
private:
/// Returns the coordinate in the output gradient tensor Dy that is (offset_nzpq, k) pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at_(int offset_nzpq, int k) const {
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// int nzpq = offset_nzpq_;
// int n = nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q);
// int residual = nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q);
//
// int z = residual / (problem_size_.P * problem_size_.Q);
// residual = residual % (problem_size_.P * problem_size_.Q);
//
// int p = residual / problem_size_.Q;
// int q = residual % problem_size_.Q;
int residual, n, z, p, q;
fast_divmod(n, residual, offset_nzpq, params_.ZPQ, params_.zpq_mul, params_.zpq_shr);
fast_divmod(z, residual, residual, params_.PQ, params_.pq_mul, params_.pq_shr);
fast_divmod(p, q, residual, problem_size_.Q, params_.q_mul, params_.q_shr);
return TensorCoord(n, z, p, q, k);
}
/// Returns true if the coord is within the output gradient tensor Dy
CUTLASS_HOST_DEVICE
bool valid_(TensorCoord coord) const {
return coord.n() < problem_size_.N &&
coord.c() < problem_size_.K;
}
public:
/// Returns true if the current coordinate is within the output gradient tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous;
return (predicates_ & (1u << pred_idx));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(
pointer_ +
iteration_strided_ * params_.offset_next_strided +
iteration_contiguous_ * params_.offset_next_contiguous
);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientTileAccessIteratorOptimized &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 10,744 | C | 33.549839 | 124 | 0.655622 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_tile_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template wraps the tile access iterator concept to load whole tiles from tensors in
memory used for implicit GEMM convolution.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename TileAccessIterator_>
class TileIterator {
public:
using TileAccessIterator = TileAccessIterator_;
using Shape = typename TileAccessIterator::Shape;
using Element = typename TileAccessIterator::Element;
using Layout = typename TileAccessIterator::Layout;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = typename TileAccessIterator::ThreadMap;
using AccessType = typename TileAccessIterator::AccessType;
using TensorRef = typename TileAccessIterator::TensorRef;
using Index = typename TileAccessIterator::Index;
using LongIndex = typename TileAccessIterator::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = TileAccessIterator::kIteratorAlgorithm;
static StrideSupport const kStrideSupport = TileAccessIterator::kStrideSupport;
using Params = typename TileAccessIterator::Params;
static int const kConvDim = TileAccessIterator::kConvDim;
using ConvProblemSize = typename TileAccessIterator::ConvProblemSize;
static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<
Element,
ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
private:
/// Internal state
TileAccessIterator tile_access_iterator_;
public:
/// Constructor
CUTLASS_HOST_DEVICE
TileIterator(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
tile_access_iterator_(params, problem_size, ptr, thread_idx, threadblock_offset) { }
CUTLASS_HOST_DEVICE
static Params getParams(ConvProblemSize const &problem_size, Layout const &layout) {
return TileAccessIterator::getParams(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
tile_access_iterator_.set_iteration_index(index);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
tile_access_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIterator &operator++() {
tile_access_iterator_.advance();
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIterator operator++(int) {
TileIterator self(*this);
operator++();
return self;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.clear();
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[idx],
tile_access_iterator_.get() + pointer_offset,
tile_access_iterator_.valid()
);
++tile_access_iterator_;
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
tile_access_iterator_.set_iteration_index(0);
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void advance() {
tile_access_iterator_.advance();
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// dispatch to iterator implementation
return TileAccessIterator::can_implement(problem_size);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Strided Dgrad Tile Iterator
template <typename TileAccessIterator_>
class TileIteratorStridedDgrad {
public:
using TileAccessIterator = TileAccessIterator_;
using Shape = typename TileAccessIterator::Shape;
using Element = typename TileAccessIterator::Element;
using Layout = typename TileAccessIterator::Layout;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = typename TileAccessIterator::ThreadMap;
using AccessType = typename TileAccessIterator::AccessType;
using TensorRef = typename TileAccessIterator::TensorRef;
using Index = typename TileAccessIterator::Index;
using LongIndex = typename TileAccessIterator::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = TileAccessIterator::kIteratorAlgorithm;
static StrideSupport const kStrideSupport = TileAccessIterator::kStrideSupport;
using Params = typename TileAccessIterator::Params;
static int const kConvDim = TileAccessIterator::kConvDim;
using ConvProblemSize = typename TileAccessIterator::ConvProblemSize;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<
Element,
ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
private:
/// Internal state
TileAccessIterator tile_access_iterator_;
public:
/// Constructor (output gradient (Dy) OperandA ctor)
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
tile_access_iterator_(
params,
problem_size,
ptr,
thread_idx,
stride_h_divmod, stride_w_divmod,
start_r, start_s,
threadblock_offset) { }
/// Constructor (filter (w) OperandB ctor)
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
tile_access_iterator_(params,
problem_size,
ptr,
thread_idx,
start_r, start_s,
threadblock_offset) { }
CUTLASS_HOST_DEVICE
static Params getParams(ConvProblemSize const &problem_size, Layout const &layout) {
return TileAccessIterator::getParams(problem_size, layout);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
tile_access_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad &operator++() {
tile_access_iterator_.advance();
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
TileIteratorStridedDgrad operator++(int) {
TileIteratorStridedDgrad self(*this);
operator++();
return self;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.clear();
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[c + s * ThreadMap::Iterations::kContiguous],
tile_access_iterator_.get() + pointer_offset,
tile_access_iterator_.valid()
);
++tile_access_iterator_;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
tile_access_iterator_.set_iteration_index(0);
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void advance() {
tile_access_iterator_.advance();
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// dispatch to iterator implementation
return TileAccessIterator::can_implement(problem_size);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,202 | C | 32.14497 | 100 | 0.672916 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_wgrad_output_gradient_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dWgradOutputGradientTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
using Params = Conv2dWgradOutputGradientIteratorOptimizedParams;
private:
Conv2dWgradOutputGradientIteratorOptimizedParams const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
uint32_t predicates_[kAccessesPerVector];
int filter_k_;
int offset_npq_;
public:
CUTLASS_HOST_DEVICE
Conv2dWgradOutputGradientTileAccessIteratorOptimized(
Conv2dWgradOutputGradientIteratorOptimizedParams const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_{0},
filter_k_(0),
offset_npq_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.row() + thread_coord.contiguous();
offset_npq_ = threadblock_offset.column() + thread_coord.strided();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int filter_k = filter_k_ + c * ThreadMap::Delta::kContiguous;
int offset_npq = offset_npq_ + s * ThreadMap::Delta::kStrided;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
bool predicate = valid_(at_(offset_npq, filter_k + v * AccessType::kElements));
uint32_t pred = (predicate ? 1u : 0);
int pred_idx = c + s * ThreadMap::Iterations::kContiguous;
predicates_[v] |= (pred << pred_idx);
}
}
}
// Offset pointer to (iteration_strided_, iteration_contiguous_) = (0, 0)
pointer_ += (
offset_npq_ * params.layout.stride()[0] + filter_k_
) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided});
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_npq_) in GEMM-A by a CTA-K tile
offset_npq_ += Shape::kColumn * problem_size_.split_k_slices;
// Clear predicates if needed
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
if (offset_npq_ + s * ThreadMap::Delta::kStrided >= params_.NPQ) {
uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous);
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
predicates_[v] = (predicates_[v] & (~kClearMask));
}
}
}
pointer_ += params_.inc_next_npq;
}
private:
/// Returns the coordinate in the output gradient tensor Dy that is pointed to
/// by offset_npq and k.
CUTLASS_HOST_DEVICE
TensorCoord at_(int offset_npq, int k) const {
// The subsequent fast_divmod() operations are equivalent to the following logical computation:
//
//
// int npq = offset_npq;
// int n = npq / (problem_size_.P * problem_size_.Q);
// int residual = npq % (problem_size_.P * problem_size_.Q);
//
// int p = residual / problem_size_.Q;
// int q = residual % problem_size_.Q;
int residual, n, p, q;
params_.pq_divmod(n, residual, offset_npq);
params_.q_divmod(p, q, residual);
return TensorCoord(n, p, q, k);
}
/// Returns true if the coord is within the output gradient tensor Dy
CUTLASS_HOST_DEVICE
bool valid_(TensorCoord coord) const {
return coord.n() < problem_size_.N &&
coord.c() < problem_size_.K;
}
public:
/// Returns true if the current coordinate is within the output gradient tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous;
return (predicates_[iteration_vector_] & (1u << pred_idx));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(
pointer_ +
iteration_strided_ * params_.offset_next_strided +
iteration_contiguous_ * params_.offset_next_contiguous
) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dWgradOutputGradientTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 10,832 | C | 33.832797 | 124 | 0.654727 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/depthwise_fprop_direct_conv_multistage.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a multistage threadblock-scoped Implicit GEMM Convolution kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/cache_operation.h"
#include "cutlass/conv/threadblock/depthwise_mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Epilogue stores the data into global memory
typename Epilogue_,
/// iterator implementation variants
conv::IteratorAlgorithm IteratorAlgorithm_ = conv::IteratorAlgorithm::kOptimized,
/// Used for partial specialization
typename Enable = bool>
class DepthwiseFpropDirectConvMultipleStage :
public DepthwiseDirectConvMmaBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = DepthwiseDirectConvMmaBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Policy describing tuning details
using Policy = Policy_;
using Epilogue = Epilogue_;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
static conv::IteratorAlgorithm const kItertorAlgorithm = IteratorAlgorithm_;
//
// Dependent types
//
/// Fragment of accumulator tile
using ElementC = typename Policy::Operator::ElementC;
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Internal structure exposed for introspection.
struct Detail {
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
};
private:
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
DepthwiseFpropDirectConvMultipleStage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A,
IteratorB &iterator_B,
int group_start_A = 0,
int group_start_B = 0) {
if (kItertorAlgorithm == conv::IteratorAlgorithm::kFixedStrideDilation) {
// Number of iterators is a static value.
iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
} else {
// Number of iterators is a runtime value.
iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < iterator_A.get_iteration_num(); ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA &iterator_A,
///< Params of global memory iterator
typename IteratorA::Params const &iterator_a_params,
///< iterator over B operand in global memory
IteratorB &iterator_B,
///< Params of global memory iterator
typename IteratorB::Params const &iterator_b_params,
///< initial value of accumulator
FragmentC const &src_accum,
/// Epilogue
Epilogue &epilogue,
///< Output operator
typename Epilogue::OutputOp const &output_op,
///< Tile iterator for destination
typename Epilogue::OutputTileIterator &destination_iterator,
///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
typename Epilogue::OutputTileIterator &source_iterator,
int split_k_slices = 1
) {
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) {
if (stage == 0) {
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, iterator_B.get(), iterator_B.valid());
++iterator_B;
}
++this->smem_iterator_B_;
}
}
if(kItertorAlgorithm == conv::IteratorAlgorithm::kFixedStrideDilation){
// Number of iterators is compilation static.
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
} else {
// Number of iterators is a runtime value.
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_num(iterator_A.get_iteration_num());
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < iterator_A.get_iteration_num(); ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
}
// Move to the next stage
iterator_A.advance();
this->smem_iterator_A_.add_tile_offset({1, 0});
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
}
/////////////////////////////////////////////////////////////////////////////
// Waits until kStages-2 stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA warp_loaded_frag_A[2];
WarpLoadedFragmentB warp_loaded_frag_B[2];
WarpTransformedFragmentA warp_transformed_frag_A[2];
WarpTransformedFragmentB warp_transformed_frag_B[2];
Operator warp_mma;
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.setup_initial_status(iterator_a_params);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0],
warp_loaded_frag_A[0], warp_loaded_frag_B[0]);
//
// Mainloop
//
unsigned int iterations = 0;
constexpr int inner_loop_iterations = round_up(Base::kWarpGemmIterations, 2);
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) { // Each iteration is a cta tile.
accum.clear();
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < inner_loop_iterations; ++warp_mma_k) {
if (Base::kWarpGemmIterations % 2 == 0 || warp_mma_k + 1 != Base::kWarpGemmIterations) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Shape::kK);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Shape::kK);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
}
if (warp_mma_k > 0)
warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
warp_loaded_frag_A[warp_mma_k % 2],
warp_loaded_frag_B[warp_mma_k % 2]);
// Issue global->shared copies for the next stage
int group_start_iteration_A, group_start_iteration_B;
if (warp_mma_k == 0) {
group_start_iteration_A = 0;
group_start_iteration_B = 0;
copy_tiles_and_advance(
iterator_A, iterator_B, group_start_iteration_A, group_start_iteration_B);
}
if (warp_mma_k < Base::kWarpGemmIterations) {
warp_mma(
accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
accum
);
}
if (warp_mma_k + 1 == inner_loop_iterations)
warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_transformed_frag_B[(warp_mma_k + 1) % 2],
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
if (warp_mma_k + 2 == inner_loop_iterations) {
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages of cp.async have committed
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next cta
iterator_A.advance();
this->smem_iterator_A_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.advance(- (Base::kStages-1) * iterator_A.get_load_size());
smem_read_stage_idx = 0;
} else {
this->warp_tile_iterator_A_.advance(iterator_A.get_load_size());
++smem_read_stage_idx;
}
if (kItertorAlgorithm == conv::IteratorAlgorithm::kFixedStrideDilation) {
this->warp_tile_iterator_A_.setup_initial_status(iterator_a_params);
}
// goback to start position. B has no multiple stage
this->warp_tile_iterator_B_.add_tile_offset({-Policy::kPartitionsK * Shape::kK, 0});
--gemm_k_iterations;
}
}
//
// Epilogue
//
int32_t smem_base_offset = iterator_B.get_load_size() + (iterations % Base::kStages) * iterator_A.get_load_size();
destination_iterator.set_tile_index(iterations * split_k_slices);
source_iterator.set_tile_index(iterations * split_k_slices);
epilogue(output_op, destination_iterator, accum, source_iterator, smem_base_offset);
++iterations;
}
// Insert fence and wait for all outstanding cp.async operations to commit.
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 20,899 | C | 36.862319 | 120 | 0.611943 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_params.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Extracts the host-params objects into non-template code.
*/
#pragma once
#define TRACE_CONV_PARAMS_INITIALIZERS_ENABLED 0
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
#include "cutlass/conv/conv3d_problem_size.h"
#if TRACE_CONV_PARAMS_INITIALIZERS_ENABLED
#include <fstream>
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Params structure used for all Conv3d analytic tile iterators
template< typename Layout_ = layout::TensorNDHWC >
struct Conv3dAnalyticParams {
using Layout = Layout_;
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv3dAnalyticParams() { }
CUTLASS_HOST_DEVICE
Conv3dAnalyticParams(
Conv3dProblemSize const &, // unused; placeholder to match other Params interfaces.
Layout const &layout
): layout(layout) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for Conv3dFpropActivationTileIteratorOptimized
template< typename Layout_ = layout::TensorNDHWC >
struct Conv3dFpropActivationIteratorOptimizedParams;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for Conv3dFpropActivationTileIteratorOptimized
template<>
struct Conv3dFpropActivationIteratorOptimizedParams<layout::TensorNDHWC> {
using Layout = layout::TensorNDHWC;
Layout layout;
int64_t inc_next[4]; // {next S, next R, next T, next C}
int filter_c_delta; // number of logical elements to add to filter_c_
int ZPQ; // product of Z*P*Q
int PQ; // product of P*Q
FastDivmod zpq_divmod;
FastDivmod pq_divmod;
FastDivmod q_divmod;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv3dFpropActivationIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv3dFpropActivationIteratorOptimizedParams(
Conv3dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout),
PQ(problem_size.P * problem_size.Q),
ZPQ(problem_size.Z * problem_size.P * problem_size.Q),
zpq_divmod(ZPQ),
pq_divmod(PQ),
q_divmod(problem_size.Q) {
TRACE_CONV_INITIALIZERS("conv3d_fprop", "activation",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
int conv_sign = (problem_size.mode == Mode::kConvolution ? -1 : 1);
// next S
inc_next[0] = conv_sign * (
int64_t(layout.stride()[0]) * problem_size.dilation_w
) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
int64_t(layout.stride()[1]) * problem_size.dilation_h
- (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next T
inc_next[2] = conv_sign * (
int64_t(layout.stride()[2]) * problem_size.dilation_d
- (problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h
- (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next C
inc_next[3] = (
threadblock_shape.column() * problem_size.split_k_slices
- conv_sign * int64_t(problem_size.T - 1) * layout.stride()[2] * problem_size.dilation_d
- conv_sign * int64_t(problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h
- conv_sign * int64_t(problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_c_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Layout_ = layout::TensorNDHWC >
struct Conv3dFpropFilterIteratorOptimizedParams;
/////////////////////////////////////////////////////////////////////////////////////////////////
template<>
struct Conv3dFpropFilterIteratorOptimizedParams<layout::TensorNDHWC>
{
using Layout = layout::TensorNDHWC;
Layout layout;
int TRS;
int filter_c_delta;
int64_t inc_next_k; // offset in units of bytes to next K position
int64_t inc_next_trs; // offset in units of bytes to next TRS position
int64_t inc_next_c; // offset in units of bytes to next C position
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv3dFpropFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv3dFpropFilterIteratorOptimizedParams(
Conv3dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout) {
TRACE_CONV_INITIALIZERS("conv3d_fprop", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
TRS = problem_size.T * problem_size.R * problem_size.S;
inc_next_k = (int64_t(layout.stride()[3]) * threadmap_delta.strided() * element_size_bits) / 8;
inc_next_trs =
( int64_t(layout.stride()[0])
- int64_t(layout.stride()[3]) * (threadmap_iterations.strided() - 1) * threadmap_delta.strided()
) * element_size_bits / 8;
inc_next_c =
(
threadblock_shape.row() * problem_size.split_k_slices
- int64_t(TRS - 1) * layout.stride()[0]
- int64_t(threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[3]
) * element_size_bits / 8;
filter_c_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters object for Conv3d DGRAD OutputGradient (dy) iterator
struct Conv3dDgradOutputGradientIteratorOptimizedParams {
using Layout = layout::TensorNDHWC;
Layout layout;
int64_t inc_next[4]; // {next S, next R, next T, next K}
int filter_k_delta; // number of logical elements to add to filter_k_
FastDivmod dhw_divmod;
FastDivmod hw_divmod;
FastDivmod w_divmod;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientIteratorOptimizedParams(
Conv3dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout),
dhw_divmod(problem_size.D * problem_size.H * problem_size.W),
hw_divmod(problem_size.H * problem_size.W),
w_divmod(problem_size.W) {
TRACE_CONV_INITIALIZERS("conv3d_dgrad", "output_gradient",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
int conv_sign = (problem_size.mode == Mode::kConvolution ? 1 : -1);
// next S
inc_next[0] = conv_sign * (
int64_t(layout.stride()[0]) * problem_size.dilation_w
) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
int64_t(layout.stride()[1]) * problem_size.dilation_h
- (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next T
inc_next[2] = conv_sign * (
int64_t(layout.stride()[2]) * problem_size.dilation_d
- (problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h
- (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next K
inc_next[3] = (
threadblock_shape.column() * problem_size.split_k_slices
- conv_sign * int64_t(problem_size.T - 1) * layout.stride()[2] * problem_size.dilation_d
- conv_sign * int64_t(problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h
- conv_sign * int64_t(problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_k_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters object for Conv2d DGRAD Filter (w) iterator
struct Conv3dDgradFilterIteratorOptimizedParams {
using Layout = layout::TensorNDHWC;
Layout layout;
int TRS;
int filter_k_delta;
int64_t inc_next_strided; // offset in units of bytes to next K coordinate within tile
int64_t inc_next_trs; // offset in units of bytes to next TRS position
int64_t inc_next_k; // offset in units of bytes to next K position in subsequent tile
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv3dDgradFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv3dDgradFilterIteratorOptimizedParams(
Conv3dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout), TRS(problem_size.T * problem_size.R * problem_size.S) {
TRACE_CONV_INITIALIZERS("conv3d_dgrad", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
inc_next_strided = ((int64_t)layout.stride()[3] * threadmap_delta.strided() * element_size_bits) / 8;
inc_next_trs =
( (int64_t)layout.stride()[0]
- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[3]
) * element_size_bits / 8;
inc_next_k =
(
threadblock_shape.row() * problem_size.split_k_slices * (int64_t)layout.stride()[3]
- (problem_size.T * problem_size.R * problem_size.S - 1) * (int64_t)layout.stride()[0]
- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[3]
) * element_size_bits / 8;
filter_k_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
};
/// Parameters object for Conv3d WGRAD OutputGradient iterator
struct Conv3dWgradOutputGradientIteratorOptimizedParams {
using Layout = layout::TensorNDHWC;
using LongIndex = typename Layout::LongIndex;
Layout layout;
int NZPQ; // precomputd product of N*Z*P*Q for clearing predicates
int ZPQ; // product of Z*P*Q
unsigned zpq_mul; // precomputed quantities for fast computation of div/% by ZPQ
unsigned zpq_shr; // in device code.
int PQ; // product of P*Q
unsigned pq_mul; // precomputed quantities for fast computation of div/% by PQ
unsigned pq_shr; // in device code.
unsigned q_mul; // precomputed quantities for fast computation of div/% by Q
unsigned q_shr; // in device code.
LongIndex offset_next_strided; // offset in units of bytes to next nzpq coordinate within tile
LongIndex offset_next_contiguous; // offset in units of bytes to next k coordinate within tile
LongIndex inc_next_nzpq; // offset in units of bytes to next nzpq position in subsequent tile
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientIteratorOptimizedParams(
Conv3dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits,
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
): layout(layout) {
TRACE_CONV_INITIALIZERS("conv3d_wgrad", "output_gradient",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
// Incremental offsets in unites of bytes (number of elements) * element_size_bits / 8
offset_next_strided = (threadmap_delta.strided() * (int64_t)layout.stride()[0])
* element_size_bits / 8;
offset_next_contiguous = (threadmap_delta.contiguous())
* element_size_bits / 8;
inc_next_nzpq = (threadblock_shape.column() * problem_size.split_k_slices * (int64_t)layout.stride()[0])
* element_size_bits / 8;
// Precompute several quantities for fast modulo arithmetic.
NZPQ = problem_size.N * problem_size.Z * problem_size.P * problem_size.Q;
ZPQ = problem_size.Z * problem_size.P * problem_size.Q;
find_divisor(zpq_mul, zpq_shr, ZPQ);
PQ = problem_size.P * problem_size.Q;
find_divisor(pq_mul, pq_shr, PQ);
find_divisor(q_mul, q_shr, problem_size.Q);
}
};
/// Parameters object for Conv3d WGRAD Activation Tile Access Iterator
struct Conv3dWgradActivationIteratorOptimizedParams {
using Layout = layout::TensorNDHWC;
Layout layout;
int RSC; // product of R*S*C
unsigned rsc_mul; // precomputed quantities for fast computation of div/% by RSC
unsigned rsc_shr; // in device code.
int SC; // product of S*C
unsigned sc_mul; // precomputed quantities for fast computation of div/% by SC
unsigned sc_shr; // in device code.
unsigned c_mul; // precomputed quantities for fast computation of div/% by C
unsigned c_shr; // in device code.
int ZPQ; // product of Z*P*Q
unsigned zpq_mul; // precomputed quantities for fast computation of div/% by ZPQ
unsigned zpq_shr; // in device code.
int PQ; // product of P*Q
unsigned pq_mul; // precomputed quantities for fast computation of div/% by PQ
unsigned pq_shr; // in device code.
unsigned q_mul; // precomputed quantities for fast computation of div/% by Q
unsigned q_shr; // in device code.
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv3dWgradActivationIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv3dWgradActivationIteratorOptimizedParams(
Conv3dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits,
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
): layout(layout) {
TRACE_CONV_INITIALIZERS("conv3d_wgrad", "activation",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
// Precompute several quantities for fast modulo arithmetic.
RSC = problem_size.R * problem_size.S * problem_size.C;
find_divisor(rsc_mul, rsc_shr, RSC);
SC = problem_size.S * problem_size.C;
find_divisor(sc_mul, sc_shr, SC);
find_divisor(c_mul, c_shr, problem_size.C);
ZPQ = problem_size.Z * problem_size.P * problem_size.Q;
find_divisor(zpq_mul, zpq_shr, ZPQ);
PQ = problem_size.P * problem_size.Q;
find_divisor(pq_mul, pq_shr, PQ);
find_divisor(q_mul, q_shr, problem_size.Q);
}
};
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 18,249 | C | 34.854617 | 110 | 0.633131 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/depthwise_mma_core_with_lane_access_size.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting depthwise related simt instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/warp/mma_depthwise_simt.h"
#include "cutlass/gemm/threadblock/mma_pipelined.h"
#include "cutlass/gemm/threadblock/mma_singlestage.h"
#include "cutlass/gemm/threadblock/mma_base.h"
#include "cutlass/conv/threadblock/depthwise_mma_base.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h"
#include "cutlass/arch/cache_operation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
namespace detail {
//
// Convert a WarpShapeM which is the whole tile of elements into the number of elements (2D) held by
// each partitions within warp.
// The goal is for each thread's tile of elements to be as square as
// possible for performance (4x4 will be faster than 2x8).
template<int WarpShapeM, // The number of elements (1D) contained in the entire warp
int WarpNumThreadsM> // The number of partitions within the warp
struct SimtWarpShape {
// kP * kQ * WarpNumThreadsM = WarpShapeM
// If needed, enable more specializations.
};
template <>
struct SimtWarpShape<4, 4> {
static constexpr int kP = 1;
static constexpr int kQ = 1;
};
template <>
struct SimtWarpShape<4, 2> {
static constexpr int kP = 2;
static constexpr int kQ = 1;
};
template <>
struct SimtWarpShape<4, 1> {
static constexpr int kP = 2;
static constexpr int kQ = 2;
};
template <>
struct SimtWarpShape<8, 1> {
static constexpr int kP = 2;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<8, 2> {
static constexpr int kP = 2;
static constexpr int kQ = 2;
};
template <>
struct SimtWarpShape<8, 4> {
static constexpr int kP = 1;
static constexpr int kQ = 2;
};
template <>
struct SimtWarpShape<16, 1> {
static constexpr int kP = 4;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<16, 2> {
static constexpr int kP = 2;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<16, 4> {
static constexpr int kP = 2;
static constexpr int kQ = 2;
};
template <int WarpNumThreadsM>
struct SimtWarpShape<25, WarpNumThreadsM> {
static_assert(WarpNumThreadsM == 1, "WarpShapeM could not be evenly splited by threads");
static constexpr int kP = 5;
static constexpr int kQ = 5;
};
template <>
struct SimtWarpShape<32, 1> {
static constexpr int kP = 4;
static constexpr int kQ = 8;
};
template <>
struct SimtWarpShape<32, 2> {
static constexpr int kP = 4;
static constexpr int kQ = 4;
};
template <>
struct SimtWarpShape<32, 4> {
static constexpr int kP = 2;
static constexpr int kQ = 4;
};
} // namespace detail
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_ = 0,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeB_ = 0,
/// Number of stages
int Stages = 2,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global,
/// per-element transformation for elements of A
ComplexTransform TransformA = ComplexTransform::kNone,
/// per-element transformation for elements of B
ComplexTransform TransformB = ComplexTransform::kNone,
bool IsComplex = false // (is_complex<ElementA>::value || is_complex<ElementB>::value)
>
struct DepthwiseMmaCoreWithLaneAccessSize;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of threadblock-scoped output tile
typename ThreadBlockOutputShape,
/// Shape of filter shape per threadblock
typename FilterShape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_ = 0,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeB_ = 0,
/// Number of stages
int Stages = 2,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Iterator algo type
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape = cutlass::MatrixShape<-1, -1>,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape = cutlass::MatrixShape<-1, -1>,
/// Activation Shape loaded by threadblock
typename ActivationShape = cutlass::conv::TensorNHWCShape<-1,-1,-1,-1>,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global,
/// per-element transformation for elements of A
ComplexTransform TransformA = ComplexTransform::kNone,
/// per-element transformation for elements of B
ComplexTransform TransformB = ComplexTransform::kNone,
bool IsComplex = false // (is_complex<ElementA>::value || is_complex<ElementB>::value)
>
struct DepthwiseDirectConvMmaCoreWithLaneAccessSize;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// per-element transformation for elements of A
ComplexTransform TransformA,
/// per-element transformation for elements of B
ComplexTransform TransformB,
bool IsComplex
>
struct DepthwiseMmaCoreWithLaneAccessSize<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
OperatorClass, -1, -1, Stages, Operator, AccumulatorsInRowMajor,
CacheOpA, CacheOpB, TransformA, TransformB, IsComplex
> : cutlass::gemm::threadblock::DefaultMmaCore<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
OperatorClass, Stages, Operator, AccumulatorsInRowMajor,
CacheOpA, CacheOpB, TransformA, TransformB, IsComplex
> {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: column-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a warp-scoped per thread access (a value of -1 indicates the default)
int kLaneAccessSizeA_,
/// Size of a warp-scoped per thread access (a value of -1 indicates the default)
int kLaneAccessSizeB_,
/// Operation performed by GEMM
typename Operator_>
struct DepthwiseMmaCoreWithLaneAccessSize<Shape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
kLaneAccessSizeA_,
kLaneAccessSizeB_,
2,
Operator_> : public cutlass::gemm::threadblock::DefaultMmaCore<Shape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
2,
Operator_> {
using Base = cutlass::gemm::threadblock::DefaultMmaCore<Shape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
2,
Operator_>;
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const kLaneAccessSizeA = kLaneAccessSizeA_;
static int const kLaneAccessSizeB = kLaneAccessSizeB_;
// Divisility requirements
static_assert( kLaneAccessSizeA > 0 && kLaneAccessSizeB > 0,
"Size of a warp-scoped per thread access should be larger then ZERO" );
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = typename Base::WarpCount;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value;
static int const kElementsPerAccess = 1;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory are same as base class
//
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = cutlass::gemm::threadblock::detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = kLaneAccessSizeA / sizeof_bits<ElementA>::value;
static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
static int const kPaddingM = cutlass::gemm::threadblock::detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value);
static int const kPaddingN = cutlass::gemm::threadblock::detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value);
static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<>
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>;
/// Policy used to define MmaPipelined
using MmaPolicy = cutlass::gemm::threadblock::MmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of threadblock-scoped output tile (concept: TensorNHWCShape)
typename ThreadBlockOutputShape_,
/// Shape of filter shape per threadblock
typename FilterShape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_,
/// Number of stages
int Stages_,
/// Operation performed by GEMM
typename Operator_>
struct DepthwiseDirectConvMmaCoreWithLaneAccessSize<Shape_,
ThreadBlockOutputShape_,
FilterShape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
kLaneAccessSizeA_,
128,
Stages_,
Operator_> {
using Shape = Shape_;
using FilterShape = FilterShape_;
using WarpShape = WarpShape_;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const kLaneAccessSizeB = 128;
// Divisility requirements
static_assert( kLaneAccessSizeB > 0,
"Size of a warp-scoped per thread access should be larger then ZERO" );
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = cutlass::gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
1
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
// For Gmem load
static int const kElementsPerAccessA = 128 / sizeof_bits<ElementA>::value;
static int const kElementsPerAccessB = 128 / sizeof_bits<ElementB>::value;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, 1>, // Set kStrided = 1 because activation shape is runtime value.
kThreads,
kElementsPerAccessA
>;
/// ThreadMap of iterator A
using SmemThreadMapA = IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<1, Shape::kN>, // set kRow is 1 because it is a runtime value
ElementA,
SmemLayoutA,
0,
SmemThreadMapA, // was IteratorThreadMapA
true // Dynamic iterations.
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, FilterShape::kCount>,
kThreads,
kElementsPerAccessB
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<FilterShape::kCount, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB, // was IteratorThreadMapB
false // static iterations.
>;
//
// Warp-level matrix multiply operator
//
// Groups per threads
// Fp32: 2 groups
// Fp16: 2 groups
static const int GroupsPerThread = sizeof(ElementB) > 1 ? 2 : 4;
// Define the warp-level op
static const int WarpNumThreadsN = cutlass::const_min(WarpShape::kN / GroupsPerThread, kWarpSize);
static const int WarpNumThreadsM = kWarpSize / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
// Get output P, Q per thread
static const int TileP = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kP;
static const int TileQ = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kQ;
static const int LaneLayout = 1;
static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value;
static const int LaneN = cutlass::const_min(numElementsB, WarpShape::kN / WarpNumThreadsN);
// Define the output tile computed by each thread
using ThreadOutputShape = cutlass::conv::TensorNHWCShape<1, TileP, TileQ, LaneN>;
// Fetch the channel with same access size
static const int LaneM = LaneN;
// No paddings
static int const kPaddingM = 0;
static int const kPaddingN = 0;
static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseDirectConvSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<>
FilterShape, /// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width>
ThreadOutputShape, /// Size of the output tile computed by thread - concept: conv::TensorNHWCShape<>
ThreadBlockOutputShape_, /// Size of the output tile computed by threadblock - concept: conv::TensorNHWCShape<>
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>;
/// Policy used to define MmaPipelined
using MmaPolicy = cutlass::conv::threadblock::DepthwiseDirectConvMmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
IteratorThreadMapA,
IteratorThreadMapB,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: row-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of threadblock-scoped output tile (concept: TensorNHWCShape)
typename ThreadBlockOutputShape_,
/// Shape of filter shape per threadblock
typename FilterShape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a warp-scoped per thread access
int kLaneAccessSizeA_,
/// Number of stages
int Stages_,
/// Operation performed by GEMM
typename Operator_,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape_,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape_,
/// Activation Shape loaded by threadblock
typename ActivationShape_>
struct DepthwiseDirectConvMmaCoreWithLaneAccessSize<Shape_,
ThreadBlockOutputShape_,
FilterShape_,
WarpShape_,
cutlass::gemm::GemmShape<1, 1, 1>,
ElementA_,
layout::RowMajor,
ElementB_,
layout::ColumnMajor,
ElementC_,
LayoutC_,
arch::OpClassSimt,
kLaneAccessSizeA_,
128,
Stages_,
Operator_,
IteratorAlgorithm::kFixedStrideDilation,
StrideShape_,
DilationShape_,
ActivationShape_> {
using Shape = Shape_;
using FilterShape = FilterShape_;
using WarpShape = WarpShape_;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::RowMajor;
using ElementB = ElementB_;
using LayoutB = layout::ColumnMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
using StrideShape = StrideShape_;
using DilationShape = DilationShape_;
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
using ActivationShape = ActivationShape_;
static int const kLaneAccessSizeB = 128;
// Divisility requirements
static_assert( kLaneAccessSizeB > 0,
"Size of a warp-scoped per thread access should be larger then ZERO" );
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = cutlass::gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
1
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
// For Gmem load
static int const kElementsPerAccessA = 128 / sizeof_bits<ElementA>::value;
static int const kElementsPerAccessB = 128 / sizeof_bits<ElementB>::value;
//
// Shared memory layouts
//
using SmemLayoutA = layout::RowMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<ActivationShape::kC, ActivationShape::kNHW>,
kThreads,
kElementsPerAccessA
>;
/// ThreadMap of iterator A
using SmemThreadMapA = IteratorThreadMapA;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<ActivationShape::kNHW, ActivationShape::kC>,
ElementA,
SmemLayoutA,
0,
SmemThreadMapA, // was IteratorThreadMapA
false // static iterations.
>;
/// ThreadMap of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, FilterShape::kCount>,
kThreads,
kElementsPerAccessB
>;
/// Transpose the ThreadMap of iterator B
using SmemThreadMapB = IteratorThreadMapB;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileAccessIteratorDirectConv<
MatrixShape<FilterShape::kCount, Shape::kN>,
ElementB,
SmemLayoutB,
0,
SmemThreadMapB, // was IteratorThreadMapB
false // static iterations.
>;
//
// Warp-level matrix multiply operator
//
// Groups per threads
// Fp32: 2 groups
// Fp16: 2 groups
static const int GroupsPerThread = sizeof(ElementB) > 1 ? 2 : 4;
// Define the warp-level op
static const int WarpNumThreadsN = cutlass::const_min(WarpShape::kN / GroupsPerThread, kWarpSize);
static const int WarpNumThreadsM = kWarpSize / WarpNumThreadsN;
static const int TileP = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kP;
static const int TileQ = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kQ;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = 1;
static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value;
static const int LaneN = cutlass::const_min(numElementsB, WarpShape::kN / WarpNumThreadsN);
// Define the output tile computed by each thread
using ThreadOutputShape = cutlass::conv::TensorNHWCShape<1, TileP, TileQ, LaneN>;
// Fetch the channel with same access size
static const int LaneM = LaneN;
// No paddings
static int const kPaddingM = 0;
static int const kPaddingN = 0;
static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN),
"Padding must be divisible by Lane");
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseDirectConvSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<>
FilterShape, /// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width>
ThreadOutputShape, /// Size of the output tile computed by thread - concept: conv::TensorNHWCShape<>
ThreadBlockOutputShape, /// Size of the output tile computed by threadblock - concept: conv::TensorNHWCShape<>
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
IteratorAlgorithm::kFixedStrideDilation, /// Iterator algo type
StrideShape, /// Stride ( MatrixShape<Height, Width> )
DilationShape, /// Dilation ( MatrixShape<Height, Width> )
ActivationShape /// Activation Shape loaded by threadblock
>;
/// Policy used to define MmaPipelined
using MmaPolicy = cutlass::conv::threadblock::DepthwiseDirectConvMmaPolicy<
MmaWarpSimt,
MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts
MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts
IteratorThreadMapA,
IteratorThreadMapB,
WarpCount::kK
>;
};
} // namespace threadblock
} // namespace conv
} // namespace cutlass
| 36,697 | C | 37.50787 | 142 | 0.619696 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_analytic.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dWgradOutputGradientTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
struct Params {
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
Conv3dProblemSize const &problem_size,
Layout const &layout
): layout(layout) {
}
};
private:
Params const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
int filter_k_[ThreadMap::Iterations::kContiguous];
int offset_nzpq_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientTileAccessIteratorAnalytic(
Params const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
// initialize filter_k for every contiguous iteration
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
filter_k_[c] = threadblock_offset.row() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
}
// initialize n, p, q offset for every strided iteration
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_nzpq_[s] = threadblock_offset.column() + thread_coord.strided()
+ s * ThreadMap::Delta::kStrided;
}
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_nzpq_) in GEMM-A by a CTA-K tile
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_nzpq_[s] += Shape::kColumn * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the output gradient tensor Dy that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int nzpq = offset_nzpq_[iteration_strided_];
int n = nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q);
int residual = nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q);
int z = residual / (problem_size_.P * problem_size_.Q);
residual = residual % (problem_size_.P * problem_size_.Q);
int p = residual / problem_size_.Q;
int q = residual % problem_size_.Q;
return TensorCoord(n, z, p, q, filter_k_[iteration_contiguous_]);
}
/// Returns true if the current coordinate is within the output gradient tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.d() < problem_size_.Z &&
coord.h() < problem_size_.P &&
coord.w() < problem_size_.Q &&
coord.c() < problem_size_.K;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dWgradOutputGradientTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,821 | C | 31.91791 | 105 | 0.656502 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/threadblock_swizzle.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implements several possible threadblock-swizzling functions mapping blockIdx to
Convolution problems.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
static int get_strided_dgrad_tile_m(
cutlass::conv::Conv2dProblemSize const &problem_size,
int tile_size_m) {
// CTAs in M dimension per starting filter position
int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, tile_size_m);
// Inflate number of CTAs in M dimension to cover every strating filter position even those that
// may fall out of valid MMA (Dy * w) but are needed to apply epilogue (beta * Dx_source)
// and point-wise fusion
int tile_m = tile_m_per_filter * int(problem_size.stride().product());
// There is a possible performance optimization here that leads up to 2x speeds than the current
// CUTLASS strided dgrad performance for stride > filter, i.e., stride={2x2} and filter={1x1})
//
// * Optimization *
// Only launch CTAs in M dimenstion which contribute to a row in Dx output
//
//
// * Constraints *
// (A) stride <= filter, for example, stride={2x2} and filter={3x3}:
// - (A.1): There are no constraints for this case and the optimization does
// affect this case functionality or performance.
// (B) stride > filter, for example, stride={2x2} and filter={1x1}:
// - (B.1): Dx output tensor should be zero initialized
// - (B.2): The kernel epilogue cannot apply beta. Thus, beta should be zero
return tile_m;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for strided dgrad convolution
struct StridedDgradHorizontalThreadblockSwizzle :
public gemm::threadblock::GemmHorizontalThreadblockSwizzle {
using Base = gemm::threadblock::GemmHorizontalThreadblockSwizzle;
CUTLASS_HOST_DEVICE
StridedDgradHorizontalThreadblockSwizzle() { }
/// Returns the shape of the problem in units of logical tiles
/// For ImplicitGemmConvolution Conv2d problem size: conv_operator(NPQK, NHWC, KRSC)
CUTLASS_HOST_DEVICE
gemm::GemmCoord get_tiled_shape(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
gemm::GemmCoord tile_size,
int split_k_slices) const {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
// compute number of tiles in m dimension
int tile_m = get_strided_dgrad_tile_m(problem_size, tile_size.m());
// compute number of tiles in n dimenstion
int tile_n = (implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n();
return gemm::GemmCoord(
tile_m,
tile_n,
split_k_slices);
}
/// Returns the shape of the problem in units of logical tiles
/// For GEMM problem size (MxNxK) (Do not use base class get_tiled_shape())
private:
using Base::get_tiled_shape;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for strided dgrad convolution
template <int N = 1>
struct StridedDgradIdentityThreadblockSwizzle :
public gemm::threadblock::GemmIdentityThreadblockSwizzle<N> {
using Base = gemm::threadblock::GemmIdentityThreadblockSwizzle<N>;
CUTLASS_HOST_DEVICE
StridedDgradIdentityThreadblockSwizzle() { }
/// Returns the shape of the problem in units of logical tiles
/// For ImplicitGemmConvolution Conv2d problem size: conv_operator(NPQK, NHWC, KRSC)
CUTLASS_HOST_DEVICE
gemm::GemmCoord get_tiled_shape(
cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
gemm::GemmCoord tile_size,
int split_k_slices) const {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
// compute number of tiles in m dimension
int tile_m = get_strided_dgrad_tile_m(problem_size, tile_size.m());
// compute number of tiles in n dimenstion
int tile_n = (implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n();
return gemm::GemmCoord(
tile_m,
tile_n,
split_k_slices);
}
/// Returns the shape of the problem in units of logical tiles
/// For GEMM problem size (MxNxK) (Do not use base class get_tiled_shape())
private:
using Base::get_tiled_shape;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock swizzling function for GEMMs
template <int N = 1, int Output_N = 1, int Output_P = 1, int Output_Q = 1>
struct DepthwiseDirect2dConvIdentityThreadblockSwizzle
: public gemm::threadblock::GemmIdentityThreadblockSwizzle<N> {
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvIdentityThreadblockSwizzle() {}
/// Returns the shape of the problem in units of logical tiles
CUTLASS_HOST_DEVICE
gemm::GemmCoord get_tiled_shape(cutlass::conv::Operator conv_operator,
cutlass::conv::Conv2dProblemSize const &problem_size,
gemm::GemmCoord tile_size,
int split_k_slices) const {
gemm::GemmCoord implicit_gemm_problem_size =
cutlass::conv::implicit_gemm_problem_size(conv_operator, problem_size);
return gemm::GemmCoord(1,
(implicit_gemm_problem_size.n() + tile_size.n() - 1) / tile_size.n(),
split_k_slices);
}
};
} // namespace threadblock
} // namespace conv
} // namespace cutlass
| 8,050 | C | 40.5 | 100 | 0.643975 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity
>
class Conv3dDgradOutputGradientTileAccessIteratorOptimized {
public:
static_assert(StrideSupport_ == conv::StrideSupport::kUnity,
"Only unit-stride dgrad is supported at this time.");
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
using Coord3D = Coord<3>;
static int const kAccessesPerVector = 1;
using Mask = uint64_t;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv3dDgradOutputGradientIteratorOptimizedParams;
private:
Params const ¶ms_;
ConvProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
// current filter position (t, r, s)
int filter_t_;
int filter_r_;
int filter_s_;
int filter_k_;
Index masks_[ThreadMap::Iterations::kStrided][3];
public:
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientTileAccessIteratorOptimized(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
filter_k_(0),
filter_t_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
int offset_n[ThreadMap::Iterations::kStrided];
int offset_d[ThreadMap::Iterations::kStrided];
int offset_h[ThreadMap::Iterations::kStrided];
int offset_w[ThreadMap::Iterations::kStrided];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_ndhw = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// offset_n[s] = offset_ndhw / (problem_size_.D * problem_size_.H * problem_size_.W);
// int residual = offset_ndhw % (problem_size_.D * problem_size_.H * problem_size_.W);
//
//
// offset_d[s] = residual / (problem_size_.H * problem_size_.W);
// residual = residual % (problem_size_.H * problem_size_.W);
//
// offset_h[s] = residual / problem_size_.W;
// offset_w[s] = residual % problem_size_.W;
//
int residual;
// input: (ndhw offset) output: (n offset and resudial (dhw offset))
params_.dhw_divmod(offset_n[s], residual, offset_ndhw);
// input: (dhw offset) output: (d offset and resudial (hw))
params_.hw_divmod(offset_d[s], residual, residual);
// input: (hw offset) output: (h offset and resudial (w offset))
params_.w_divmod(offset_h[s], offset_w[s], residual);
TensorCoord coord = at_(offset_n[s], offset_d[s], offset_h[s], offset_w[s], 0, 0, 0);
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
clear_mask();
CUTLASS_PRAGMA_NO_UNROLL
for (int t = 0; t < problem_size_.T; ++t) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int t_ = t;
if (problem_size_.mode == Mode::kConvolution) {
t_ = problem_size_.T - 1 - t;
}
int z = offset_d[s_idx] + problem_size_.pad_d - t_ * problem_size_.dilation_d;
bool pred = (offset_n[s_idx] < problem_size_.N && z >= 0 && z < problem_size_.Z);
masks_[s_idx][0] |= (pred << t);
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int r = 0; r < problem_size_.R; ++r) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int r_ = r;
if (problem_size_.mode == Mode::kConvolution) {
r_ = problem_size_.R - 1 - r;
}
int p = offset_h[s_idx] + problem_size_.pad_h - r_ * problem_size_.dilation_h;
bool pred = (p >= 0 && p < problem_size_.P);
masks_[s_idx][1] |= (pred << r);
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int s = 0; s < problem_size_.S; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int s_ = s;
if (problem_size_.mode == Mode::kConvolution) {
s_ = problem_size_.S - 1 - s;
}
int q = offset_w[s_idx] + problem_size_.pad_w - s_ * problem_size_.dilation_w;
bool pred = (q >= 0 && q < problem_size_.Q);
masks_[s_idx][2] |= (pred << s);
}
}
if (filter_k_ >= problem_size.K) {
clear_mask();
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided});
}
private:
/// Returns the coordinate in the output gradient tensor dy that is correspoinding to
// activation ndhw and filter position k, t, r, s
CUTLASS_HOST_DEVICE
TensorCoord at_(int n, int d, int h, int w, int t, int r, int s) const {
if (problem_size_.mode == Mode::kConvolution) {
t = problem_size_.T - 1 - t;
r = problem_size_.R - 1 - r;
s = problem_size_.S - 1 - s;
}
int z = d + problem_size_.pad_d - t * problem_size_.dilation_d;
int p = h + problem_size_.pad_h - r * problem_size_.dilation_h;
int q = w + problem_size_.pad_w - s * problem_size_.dilation_w;
return TensorCoord(n, z, p, q, filter_k_);
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset;
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask_(bool clear) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
// We are using inline PTX assembly here to avoid an CUDA C++ compilation
// artifact in which control flow instructions are generated. Instead, our
// intent is to predicate the mov instructions.
#if defined(__CUDA_ARCH__)
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][0])
:
"r"((int)clear),
"r"(masks_[s][0])
);
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][1])
:
"r"((int)clear),
"r"(masks_[s][1])
);
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][2])
:
"r"((int)clear),
"r"(masks_[s][2])
);
#else
if (clear) {
masks_[s][0] = 0;
masks_[s][1] = 0;
masks_[s][2] = 0;
}
#endif
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_HOST_DEVICE
void advance() {
int next_idx = 0;
// moves to the next tile
++filter_s_;
if (filter_s_ == problem_size_.S) {
filter_s_ = 0;
++filter_r_;
next_idx = 1;
if (filter_r_ == problem_size_.R) {
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
next_idx = 2;
}
else {
filter_t_ = 0;
next_idx = 3;
}
}
}
add_byte_offset_(params_.inc_next[next_idx]);
if (next_idx == 3) {
filter_k_ += params_.filter_k_delta;
}
clear_mask_(filter_k_ >= problem_size_.K);
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask() {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][0] = Mask(0);
masks_[s][1] = Mask(0);
masks_[s][2] = Mask(0);
}
}
CUTLASS_HOST_DEVICE
bool valid() {
return
(masks_[iteration_strided_][0] & (Index(1) << filter_t_)) &&
(masks_[iteration_strided_][1] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][2] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dDgradOutputGradientTileAccessIteratorOptimized &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// This is specialized for unit stride
if (problem_size.stride() != Coord3D({1, 1, 1})) {
return Status::kErrorNotSupported;
}
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % (128/sizeof_bits<Element>::value)) {
return Status::kErrorNotSupported;
}
// Limit on filter size
if (problem_size.T > 32 || problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 15,014 | C | 29.642857 | 114 | 0.58905 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_analytic.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dFpropActivationTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv3dAnalyticParams<Layout>;
private:
Params const ¶ms_;
ConvProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
int filter_t_;
int filter_r_;
int filter_s_;
int filter_c_;
int offset_n_[ThreadMap::Iterations::kStrided];
int offset_z_[ThreadMap::Iterations::kStrided];
int offset_p_[ThreadMap::Iterations::kStrided];
int offset_q_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv3dFpropActivationTileAccessIteratorAnalytic(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_t_(0),
filter_r_(0),
filter_s_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.column() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
int offset_nzpq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
offset_n_[s] = offset_nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q);
int residual = offset_nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q);
offset_z_[s] = residual / (problem_size_.P * problem_size_.Q);
residual = residual % (problem_size_.P * problem_size_.Q);
offset_p_[s] = residual / problem_size_.Q;
offset_q_[s] = residual % problem_size_.Q;
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
return;
}
filter_t_ = 0;
filter_c_ += Shape::kColumn * problem_size_.split_k_slices;
}
/// Returns the coordinate in the activations tensor X that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int n = offset_n_[iteration_strided_];
int z = offset_z_[iteration_strided_];
int p = offset_p_[iteration_strided_];
int q = offset_q_[iteration_strided_];
int t = filter_t_;
int r = filter_r_;
int s = filter_s_;
if (problem_size_.mode == Mode::kConvolution) {
t = (problem_size_.T - 1 - filter_t_);
r = (problem_size_.R - 1 - filter_r_);
s = (problem_size_.S - 1 - filter_s_);
}
int d = z * problem_size_.stride_d - problem_size_.pad_d + t * problem_size_.dilation_d;
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
return TensorCoord(n, d, h, w, filter_c_);
}
/// Returns true if the current coordinate is within the activations tensor X
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.d() >= 0 && coord.d() < problem_size_.D &&
coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W &&
coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
AccessType const *ptr = reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
return ptr;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dFpropActivationTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 9,634 | C | 31.996575 | 118 | 0.647291 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_few_channels.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorNCxHWx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dFpropActivationTileAccessIteratorFewChannels {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kFewChannels;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kPositionsPerTile = Shape::kColumn;
static int const kAccessesPerVector = kElementsPerAccess / AccessType::kElements;
static bool const kUseFastDivmodPrologue = true;
static bool const kUseFastDivmodMainloop = true;
static int const kStrideH = 0;
static int const kStrideW = 0;
static int const kDilationH = 0;
static int const kDilationW = 0;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dFewChannelsParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
int rsc_index_;
int offset_n_[ThreadMap::Iterations::kStrided];
int offset_p_[ThreadMap::Iterations::kStrided];
int offset_q_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dFpropActivationTileAccessIteratorFewChannels(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
rsc_index_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
rsc_index_ = (threadblock_offset.column() + thread_coord.contiguous());
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
int offset_npq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
if (kUseFastDivmodPrologue) {
int residual = params_.divmod_Q.divmod(offset_q_[s], offset_npq);
offset_n_[s] = params_.divmod_P.divmod(offset_p_[s], residual);
}
else {
offset_n_[s] = offset_npq / (problem_size_.P * problem_size_.Q);
int residual = offset_npq % (problem_size_.P * problem_size_.Q);
offset_p_[s] = residual / problem_size_.Q;
offset_q_[s] = residual % problem_size_.Q;
}
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
rsc_index_ += kPositionsPerTile * problem_size_.split_k_slices;
}
/// Returns the coordinate in the activations tensor X that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int n = offset_n_[iteration_strided_];
int p = offset_p_[iteration_strided_];
int q = offset_q_[iteration_strided_];
int rsc_index = rsc_index_ + iteration_vector_ * AccessType::kElements;
int r = 0;
int s = 0;
int c = 0;
if (kUseFastDivmodMainloop) {
int rs_index = params_.divmod_C.divmod(c, rsc_index);
r = params_.divmod_S.divmod(s, rs_index);
}
else {
c = (rsc_index % problem_size_.C);
int rs_index = (rsc_index / problem_size_.C);
s = (rs_index % problem_size_.S);
r = (rs_index / problem_size_.S);
}
if (problem_size_.mode == Mode::kConvolution) {
r = (problem_size_.R - 1 - r);
s = (problem_size_.S - 1 - s);
}
int stride_h = kStrideH;
if (!kStrideH) {
stride_h = problem_size_.stride_h;
}
int stride_w = kStrideW;
if (!kStrideW) {
stride_w = problem_size_.stride_w;
}
int dilation_h = kDilationH;
if (!kDilationH) {
dilation_h = problem_size_.dilation_h;
}
int dilation_w = kDilationW;
if (!kDilationW) {
dilation_w = problem_size_.dilation_w;
}
int h = p * stride_h - problem_size_.pad_h + r * dilation_h;
int w = q * stride_w - problem_size_.pad_w + s * dilation_w;
return TensorCoord(n, h, w, c);
}
/// Returns true if the current coordinate is within the activations tensor X
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
bool in_bounds =
coord.n() < problem_size_.N &&
coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W &&
coord.c() < problem_size_.C;
return in_bounds;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
int32_t offset =
coord.n() * params_.stride_n +
coord.h() * params_.stride_h +
coord.w() * params_.stride_w +
coord.c();
AccessType const *ptr = reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
return ptr;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dFpropActivationTileAccessIteratorFewChannels &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
if (kDilationH && problem_size.dilation_h != kDilationH) {
return Status::kErrorInvalidProblem;
}
if (kDilationW && problem_size.dilation_w != kDilationW) {
return Status::kErrorInvalidProblem;
}
if (kStrideH && problem_size.stride_h != kStrideH) {
return Status::kErrorInvalidProblem;
}
if (kStrideW && problem_size.stride_w != kStrideW) {
return Status::kErrorInvalidProblem;
}
if (platform::is_same<Layout, layout::TensorNCxHWx<32>>::value) {
if (problem_size.C % 32) {
return Status::kErrorInvalidProblem;
}
}
if (platform::is_same<Layout, layout::TensorNCxHWx<64>>::value) {
if (problem_size.C % 64) {
return Status::kErrorInvalidProblem;
}
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,529 | C | 30.939058 | 118 | 0.652181 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorNCxHWx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>,
conv::GroupMode GroupMode_ = conv::GroupMode::kNone
>
class Conv2dFpropActivationTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static conv::GroupMode const kGroupMode = GroupMode_;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dAnalyticParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
int filter_c_;
int filter_r_;
int filter_s_;
int filter_c_init_;
int group_idx_offset_;
int channels_per_group_;
int crs_cnt_;
int crs_per_group_;
int offset_n_[ThreadMap::Iterations::kStrided];
int offset_p_[ThreadMap::Iterations::kStrided];
int offset_q_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dFpropActivationTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
crs_cnt_(0),
group_idx_offset_(0),
filter_c_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.column() + thread_coord.contiguous();
if (kGroupMode != conv::GroupMode::kNone) {
filter_c_init_ = filter_c_;
channels_per_group_ = problem_size_.C / problem_size_.groups;
crs_per_group_ = problem_size_.S * problem_size_.R * ((channels_per_group_ + Shape::kColumn - 1) / Shape::kColumn);
}
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
int offset_npq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
offset_n_[s] = offset_npq / (problem_size_.P * problem_size_.Q);
int residual = offset_npq % (problem_size_.P * problem_size_.Q);
offset_p_[s] = residual / problem_size_.Q;
offset_q_[s] = residual % problem_size_.Q;
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
if (kGroupMode != conv::GroupMode::kNone) {
++crs_cnt_;
}
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
if (kGroupMode == conv::GroupMode::kNone) {
filter_c_ += Shape::kColumn * problem_size_.split_k_slices;
} else {
if (crs_cnt_ == crs_per_group_) {
// moves to next group
crs_cnt_ = 0;
++group_idx_offset_;
filter_c_ = group_idx_offset_ * channels_per_group_ + filter_c_init_;
} else {
filter_c_ += Shape::kColumn * problem_size_.split_k_slices;
}
}
}
/// Returns the coordinate in the activations tensor X that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int n = offset_n_[iteration_strided_];
int p = offset_p_[iteration_strided_];
int q = offset_q_[iteration_strided_];
int r = filter_r_;
int s = filter_s_;
if (problem_size_.mode == Mode::kConvolution) {
r = (problem_size_.R - 1 - filter_r_);
s = (problem_size_.S - 1 - filter_s_);
}
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
int c = filter_c_ + iteration_vector_ * AccessType::kElements;
return TensorCoord(n, h, w, c);
}
/// Returns true if the current coordinate is within the activations tensor X
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W &&
coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
AccessType const *ptr = reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
return ptr;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dFpropActivationTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
if (platform::is_same<Layout, layout::TensorNCxHWx<32>>::value) {
if (problem_size.C % 32) {
return Status::kErrorInvalidProblem;
}
}
if (platform::is_same<Layout, layout::TensorNCxHWx<64>>::value) {
if (problem_size.C % 64) {
return Status::kErrorInvalidProblem;
}
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 10,953 | C | 31.894895 | 121 | 0.648407 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_fixed_stride_dilation.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/threadblock/depthwise_direct_conv_params.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Shape_,
typename OutputTileShape_,
typename StrideShape_,
typename DilationShape_,
typename ActivationShape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess> >
class DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation {
public:
//
// Types
//
using Shape = Shape_;
using OutputTileShape = OutputTileShape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
// Compilation value of stride , dialtion and activation shape
using StrideShape = StrideShape_;
using DilationShape = DilationShape_;
using ActivationShape = ActivationShape_;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static int const kActivationSize = ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess * ThreadMap::kThreads *
sizeof_bits<Element>::value / 8;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1, "Require Iterations::kContiguous == 1");
static_assert(OutputTileShape::kN == 1, "Require OutputTileShape::kN == 1");
static_assert(OutputTileShape::kC == Shape::kColumn, "Require OutputTile shape == channels per threadblock");
//
// Parameters structure
//
using Params = Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams<Layout>;
private:
Conv2dProblemSize const &problem_size_;
Params const ¶ms_;
char const *pointer_;
// Base channels for current threadblock
int base_c_;
// Base activation index for current threadblock
int offset_intial_npq_;
// Base activation coord for current threadblock
TensorCoord activatioin_base_;
// Intial thread positioin
int offset_initial_hwc_;
// Overall load instruction per thread.
int iterator_load_;
// thread loading position.
int iterator_hwc_;
// activation N is inside the Tensor or not
bool valid_n_;
public:
CUTLASS_HOST_DEVICE
DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset =
MatrixCoord()
)
: params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
offset_intial_npq_(threadblock_offset.row()),
offset_initial_hwc_(thread_idx),
iterator_load_(0) {
base_c_ = threadblock_offset.column();
set_iteration_index(0);
set_activation_coord(offset_intial_npq_);
}
CUTLASS_HOST_DEVICE
void set_activation_coord(int offset_npq) {
int offset_inital_n, offset_inital_p, offset_inital_q;
int residual;
params_.pq_divmod(offset_inital_n, residual, offset_npq);
params_.q_divmod(offset_inital_p, offset_inital_q, residual);
int base_n = offset_inital_n;
int base_h =
offset_inital_p * OutputTileShape::kH * StrideShape::kRow - problem_size_.pad_h;
int base_w =
offset_inital_q * OutputTileShape::kW * StrideShape::kColumn - problem_size_.pad_w;
activatioin_base_ = TensorCoord(base_n, base_h, base_w, base_c_);
valid_n_ = activatioin_base_.n() < problem_size_.N;
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(
problem_size,
layout,
{Shape::kRow, Shape::kColumn},
{OutputTileShape::kN, OutputTileShape::kH, OutputTileShape::kW, OutputTileShape::kC},
kActivationSize);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iterator_hwc_ = offset_initial_hwc_ + index * ThreadMap::kThreads;
iterator_load_ = index;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// Go to next threadblock
offset_intial_npq_ += problem_size_.split_k_slices;
set_iteration_index(0);
set_activation_coord(offset_intial_npq_);
}
/// Returns the coordinate in the activations tensor X that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int c = iterator_hwc_ % ThreadMap::Detail::ShapeVec::kContiguous ;
int next = iterator_hwc_ / ThreadMap::Detail::ShapeVec::kContiguous ;
int h = next / ActivationShape::kW;
int w = next % ActivationShape::kW;
c = c * AccessType::kElements;
return activatioin_base_ + TensorCoord(0, h, w, c);
}
/// Returns true if the current coordinate is within the activations tensor X
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
bool valid_c = coord.c() < problem_size_.C;
bool valid_h = coord.h() >= 0 && coord.h() < problem_size_.H;
bool valid_w = coord.w() >= 0 && coord.w() < problem_size_.W;
return valid_n_ ? valid_c & valid_h & valid_w : 0;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
AccessType const *ptr =
reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
return ptr;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation &operator++() {
++iterator_load_;
iterator_hwc_ += ThreadMap::kThreads;
if (iterator_load_ < ThreadMap::Iterations::kCount) {
return *this;
}
iterator_load_ = 0;
iterator_hwc_ = offset_initial_hwc_;
return *this;
}
/// Determines the activation size loaded by iterator
CUTLASS_HOST_DEVICE
int get_load_size() {
return kActivationSize;
}
/// Determines the iterations needed
CUTLASS_HOST_DEVICE
int get_iteration_num() {
return ThreadMap::Iterations::kCount;
}
/// Determines whether the Depthwise fprop can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check stride and dilation constraint
if (problem_size.stride_h != StrideShape::kRow || problem_size.stride_w != StrideShape::kColumn) {
return Status::kErrorInvalidProblem;
}
if (problem_size.dilation_h != DilationShape::kRow || problem_size.dilation_w != DilationShape::kColumn) {
return Status::kErrorInvalidProblem;
}
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 10,747 | C | 33.120635 | 122 | 0.669861 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_wgrad_output_gradient_tile_access_iterator_analytic.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dWgradOutputGradientTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
using Params = Conv2dAnalyticParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
int filter_k_[ThreadMap::Iterations::kContiguous];
int offset_npq_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dWgradOutputGradientTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
// initialize filter_k for every contiguous iteration
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
filter_k_[c] = threadblock_offset.row() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
}
// initialize n, p, q offset for every strided iteration
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_npq_[s] = threadblock_offset.column() + thread_coord.strided()
+ s * ThreadMap::Delta::kStrided;
}
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_npq_) in GEMM-A by a CTA-K tile
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_npq_[s] += Shape::kColumn * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the output gradient tensor Dy that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int npq = offset_npq_[iteration_strided_];
int n = npq / (problem_size_.P * problem_size_.Q);
int residual = npq % (problem_size_.P * problem_size_.Q);
int p = residual / problem_size_.Q;
int q = residual % problem_size_.Q;
int k = filter_k_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements;
return TensorCoord(n, p, q, k);
}
/// Returns true if the current coordinate is within the output gradient tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.h() < problem_size_.P &&
coord.w() < problem_size_.Q &&
coord.c() < problem_size_.K;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dWgradOutputGradientTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 9,043 | C | 33.651341 | 105 | 0.666482 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/depthwise_fprop_pipelined.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Transformation applied to A operand
typename TransformA_ = NumericArrayConverter<
typename SmemIteratorA_::Element,
typename IteratorA_::Element,
IteratorA_::Fragment::kElements>,
///
/// Transformation applied to A operand
typename TransformB_ = NumericArrayConverter<
typename SmemIteratorB_::Element,
typename IteratorB_::Element,
IteratorB_::Fragment::kElements>,
/// Used for partial specialization
typename Enable = bool
>
class DepthwiseFpropPipelined : public gemm::threadblock::MmaBase<Shape_, Policy_, 2> {
public:
///< Base class
using Base = gemm::threadblock::MmaBase<Shape_, Policy_, 2>;
using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory
using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory
using ElementC = ElementC_; ///< Data type of accumulator matrix
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
using Policy = Policy_; ///< Policy describing tuning details
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
//
// Dependent types
//
/// Fragment of operand A loaded from global memory
using FragmentA = typename IteratorA::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB = typename IteratorB::Fragment;
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Obtain the arch tag from the warp-level operator
using ArchTag = typename Policy::Operator::ArchTag;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
static_assert((Base::kStages==2), "MmaPipelined requires kStages set to value 2");
private:
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
protected:
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
DepthwiseFpropPipelined(
typename Base::SharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM
int thread_idx, ///< ID within the threadblock
int warp_idx, ///< ID of warp
int lane_idx ///< ID of each thread within a warp
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
int gemm_k_iterations, ///< number of iterations of the mainloop
FragmentC &accum, ///< destination accumulator tile
IteratorA iterator_A, ///< iterator over A operand in global memory
IteratorB iterator_B, ///< iterator over B operand in global memory
FragmentC const &src_accum, ///< source accumulator tile
int gemm_k_iterations_per_channel = 0, ///< number of iterations per channel
TransformA transform_A = TransformA(), ///< transformation applied to A fragment
TransformB transform_B = TransformB()) { ///< transformation applied to B fragment
//
// Prologue
//
// Perform accumulation in the 'd' output operand
accum = src_accum;
FragmentA tb_frag_A;
FragmentB tb_frag_B;
tb_frag_A.clear();
tb_frag_B.clear();
// The last kblock is loaded in the prolog
iterator_A.load(tb_frag_A);
iterator_B.load(tb_frag_B);
++iterator_A;
++iterator_B;
this->smem_iterator_A_.store(transform_A(tb_frag_A));
this->smem_iterator_B_.store(transform_B(tb_frag_B));
++this->smem_iterator_A_;
++this->smem_iterator_B_;
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA warp_frag_A[2];
WarpFragmentB warp_frag_B[2];
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_A[0]);
this->warp_tile_iterator_B_.load(warp_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
Operator warp_mma;
int smem_write_stage_idx = 1;
// Depthwise specific
int channel_start_index = 0;
int rs_plane_idx = 0;
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing
// shared memory loads (which have the tighest latency requirement).
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
//
// Loop over GEMM K dimension
//
if(rs_plane_idx == gemm_k_iterations_per_channel - 1){
// Reset interation index.
iterator_B.set_iteration_index(0);
}
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(transform_A(tb_frag_A));
this->smem_iterator_B_.store(transform_B(tb_frag_B));
__syncthreads();
if(rs_plane_idx == gemm_k_iterations_per_channel - 1){
// Move to next set of filter groups.
channel_start_index += Base::kWarpGemmIterations;
}
++this->smem_iterator_A_;
++this->smem_iterator_B_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
else {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations,
0});
}
smem_write_stage_idx ^= 1;
}
this->warp_tile_iterator_A_.set_kgroup_index(channel_start_index + (warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index(channel_start_index + (warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k == 0) {
iterator_A.load(tb_frag_A);
iterator_B.load(tb_frag_B);
++iterator_A;
++iterator_B;
}
warp_mma(accum, warp_frag_A[warp_mma_k % 2],
warp_frag_B[warp_mma_k % 2], accum);
}
rs_plane_idx = (rs_plane_idx == gemm_k_iterations_per_channel - 1) ? 0: (rs_plane_idx + 1);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 12,744 | C | 36.818991 | 126 | 0.631905 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/depthwise_mma_base.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a directconv threadblock-scoped Depthwise kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Policy object describing MmaTensorOp
template <
/// Warp-level GEMM operator (concept: gemm::warp::Mma)
typename Operator_,
/// Padding used for A operand in shared memory (concept: MatrixShape)
typename SmemPaddingA_,
/// Padding used for B operand in shared memory (concept: MatrixShape)
typename SmemPaddingB_,
///
typename ThreadMapA_,
///
typename ThreadMapB_,
/// Number of partitions of K dimension of GEMM
int PartitionsK = 1>
struct DepthwiseDirectConvMmaPolicy {
/// Warp-level GEMM operator (concept: gemm::warp::MmaTensorOp or gemm::warp::MmaSimt)
using Operator = Operator_;
/// Padding used for A operand in shared memory
using SmemPaddingA = SmemPaddingA_;
/// Padding used for B operand in shared memory
using SmemPaddingB = SmemPaddingB_;
using ThreadMapA = ThreadMapA_;
using ThreadMapB = ThreadMapB_;
/// Number of partitions of K dimension
static int const kPartitionsK = PartitionsK;
};
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class DepthwiseDirectConvMmaBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Policy describing tuning details
using Policy = Policy_;
//
// Dependent types
//
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = cutlass::gemm::
GemmShape<Shape::kM / WarpGemm::kM, Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>;
/// Number of warp-level GEMM oeprations
/// kWarpGemmIterations could be even and odd.
static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK);
/// Number of stages
static int const kStages = Stages;
/// Tensor reference to the A operand
using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;
/// Tensor reference to the B operand
using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;
static_assert(kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
class SharedStorage {
public:
//
// Type definitions
//
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<1, // Not determined at compile-time :(
Shape::kN + Policy::SmemPaddingA::kRow>;
/// Shape of the B matrix operand in shared memory
using ShapeB = MatrixShape<Policy::ThreadMapB::StorageShape::kStrided +
Policy::SmemPaddingB::kRow, // filter_rs_size
Policy::ThreadMapB::StorageShape::kContiguous +
Policy::SmemPaddingB::kColumn>; // Tile N = 64?
public:
//
// Data members
//
// Let persistent B matrix in front of dynamic matrix A
/// Buffer for B operand
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;
/// Buffer for A operand
/// Not be determined at compile-time -- Just to get a Smem start address.
AlignedBuffer<typename Operator::ElementA, 1> operand_A;
public:
//
// Methods
//
/// Returns a layout object for the A matrix
CUTLASS_DEVICE
static typename Operator::LayoutA LayoutA() {
return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator::LayoutB LayoutB() {
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});
}
/// Returns a TensorRef to the A operand
CUTLASS_HOST_DEVICE
TensorRefA operand_A_ref() { return TensorRefA{operand_A.data(), LayoutA()}; }
/// Returns a TensorRef to the B operand
CUTLASS_HOST_DEVICE
TensorRefB operand_B_ref() { return TensorRefB{operand_B.data(), LayoutB()}; }
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A operand from shared memory
typename Operator::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator::IteratorB warp_tile_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
DepthwiseDirectConvMmaBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx),
warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,097 | C | 34.208696 | 100 | 0.638755 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/predicated_scale_bias_vector_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of scale and bias vectors.
This iterator uses masks to guard out-of-bounds accesses.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedScaleBiasVectorIterator
///
template <typename WarpShape,
typename Element,
typename Layout>
class PredicatedScaleBiasVectorIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for wgrad pitch-linear data.
///
template <typename WarpShape_, typename Element_>
class PredicatedScaleBiasVectorIterator<WarpShape_,
Element_,
layout::PitchLinear> {
public:
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kElementsPerAccess = 1;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
static int const kIterations = WarpShape::kContiguous / 8;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<__half2, 2 * kIterations * kElementsPerAccess>;
/// Parameters object is precomputed state and is host-constructible
using Params = Conv2dWgradActivationIteratorOptimizedParams;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
ConstPointer scale_pointer_;
ConstPointer bias_pointer_;
/// Size of tensor
Conv2dProblemSize problem_size_;
int32_t thread_offset_;
// Channel dimension in contiguous dimension stays constant for each gemm_iteration_k
int32_t filter_c_[kIterations];
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv2dProblemSize const &problem_size,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
problem_size_(problem_size),
scale_pointer_(scale_pointer),
bias_pointer_(bias_pointer) {
thread_offset_ = threadblock_offset.contiguous() + (thread_id % 32) / 4;
}
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Extent of tensor
Conv2dProblemSize const &problem_size,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Advances an iterator along logical dimensions of matrix in units of whole warp tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
thread_offset_ += (WarpShape::kContiguous * tile_offset.contiguous());
CUTLASS_PRAGMA_UNROLL
for(int c = 0; c < kIterations; ++c) {
int rsc_offset = thread_offset_ + c * 8;
int residual, tmp;
params_.sc_divmod(tmp, residual, rsc_offset);
params_.c_divmod(tmp, filter_c_[c], residual);
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.fill(__float2half2_rn(0.0f));
__half2 *frag_ptr = reinterpret_cast<__half2 *>(&frag);
// load scale
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
cutlass::arch::global_load<
__half,
sizeof(AccessType)
>(
frag_ptr[c * 2].x,
scale_pointer_ + filter_c_[c],
true
);
}
// load bias
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
cutlass::arch::global_load<
__half,
sizeof(AccessType)
>(
frag_ptr[c * 2 + 1].x,
bias_pointer_ + filter_c_[c],
true
);
}
// duplicate scale
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
frag_ptr[c * 2].y = frag_ptr[c * 2].x;
}
// duplicate bias
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
frag_ptr[c * 2 + 1].y = frag_ptr[c * 2 + 1].x;
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename WarpShape_,
typename Element_>
class PredicatedScaleBiasVectorIterator<WarpShape_,
Element_,
layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedScaleBiasVectorIterator<
layout::PitchLinearShape<WarpShape::kColumn, WarpShape::kRow>,
Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
using Fragment = typename UnderlyingIterator::Fragment;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedScaleBiasVectorIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Conv2dProblemSize const &problem_size, Layout const &layout)
: params_(problem_size, layout::TensorNHWC(0, 0, 0)){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Extent of tensor
Conv2dProblemSize const &problem_size,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, problem_size, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorIterator(
Params const ¶ms, ///< Precomputed parameters object
Conv2dProblemSize const &problem_size, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorIterator(params, problem_size,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Advances an iterator along logical dimensions of matrix in units of whole
/// threadblock tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
iterator_.load(frag);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 12,476 | C | 32.540322 | 100 | 0.639468 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_wgrad_activation_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (activation tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dWgradActivationTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
using Params = Conv2dWgradActivationIteratorOptimizedParams;
private:
Conv2dWgradActivationIteratorOptimizedParams const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
// Precomputed effective filter postion (r,s) in contiguous dimension stays constant for each gemm_iteration_k
// required for npq -> nhw translation
int precomputed_filter_r_[ThreadMap::Iterations::kContiguous];
int precomputed_filter_s_[ThreadMap::Iterations::kContiguous];
// Channel dimension in contiguous dimension stays constant for each gemm_iteration_k
int filter_c_[ThreadMap::Iterations::kContiguous];
int offset_npq_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dWgradActivationTileAccessIteratorOptimized(
Conv2dWgradActivationIteratorOptimizedParams const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr))
{
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
// initialize r,s,c filter position for every contiguous iteration
CUTLASS_PRAGMA_UNROLL
for(int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int rsc_offset = threadblock_offset.column() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// filter_r_[c] = rsc_offset / (problem_size_.S * problem_size_.C);
// int residual = rsc_offset % (problem_size_.S * problem_size_.C);
//
// filter_s_[c] = residual / problem_size_.C;
// filter_c_[c] = residual % problem_size_.C;
int residual;
params_.sc_divmod(precomputed_filter_r_[c], residual, rsc_offset);
params_.c_divmod(precomputed_filter_s_[c], filter_c_[c], residual);
int r = precomputed_filter_r_[c];
int s = precomputed_filter_s_[c];
if (problem_size_.mode == Mode::kConvolution) {
r = (problem_size_.R - 1 - r);
s = (problem_size_.S - 1 - s);
}
precomputed_filter_r_[c] = -problem_size_.pad_h + r * problem_size_.dilation_h;
precomputed_filter_s_[c] = -problem_size_.pad_w + s * problem_size_.dilation_w;
}
// initialize n, p, q offset for every strided iteration
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_npq_[s] = threadblock_offset.row() + thread_coord.strided()
+ s * ThreadMap::Delta::kStrided;
}
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_npq_) in GEMM-B by a CTA-K tile
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_npq_[s] += Shape::kRow * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the activation tensor x that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int r = precomputed_filter_r_[iteration_contiguous_];
int s = precomputed_filter_s_[iteration_contiguous_];
int c = filter_c_[iteration_contiguous_];
if (kAccessesPerVector > 1) {
// This code section is only to support non-128b alignment
// Multiple access to support non-128b alignment in contiguous dimenstion
int wrap_c;
params_.c_divmod(wrap_c, c, c + iteration_vector_ * AccessType::kElements);
if (problem_size_.mode == Mode::kConvolution) {
s -= (problem_size_.dilation_w * wrap_c);
int wrap_s;
params_.s_divmod(wrap_s, s, params_.small_channel_conv_s_offset - s);
s = params_.small_channel_conv_s_offset - s;
r -= (problem_size_.dilation_h * wrap_s);
} else {
s += (problem_size_.dilation_w * wrap_c);
int wrap_s;
params_.s_divmod(wrap_s, s, s + problem_size_.pad_w);
s -= problem_size_.pad_w;
r += (problem_size_.dilation_h * wrap_s);
}
}
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// int n = offset_npq_[iteration_strided_] / (problem_size_.P * problem_size_.Q);
// int residual = offset_npq_[iteration_strided_] % (problem_size_.P * problem_size_.Q);
//
// int p = residual / problem_size_.Q;
// int q = residual % problem_size_.Q;
int residual, n, p, q;
params_.pq_divmod(n, residual, offset_npq_[iteration_strided_]);
params_.q_divmod(p, q, residual);
int h = p * problem_size_.stride_h + r;
int w = q * problem_size_.stride_w + s;
return TensorCoord(n, h, w, c);
}
/// Returns true if the current coordinate is within the activation tensor x
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dWgradActivationTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,520 | C | 34.779503 | 112 | 0.653906 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/depthwise_fprop_filter_tile_access_iterator_direct_conv_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
template <typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess> >
class DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static int const kFilterSize = ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess * ThreadMap::kThreads *
sizeof_bits<Element>::value / 8;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Depthwise2dFpropDirectConvFilterIteratorParams<Layout>;
protected:
Conv2dProblemSize const &problem_size_;
Params const ¶ms_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
int filter_k_;
int offset_trs_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_k_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_trs_[s] = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout, {Shape::kRow, Shape::kColumn}, kFilterSize);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * 8 / sizeof_bits<Element>::value;
}
CUTLASS_HOST_DEVICE
void advance() {
// Do nothing because the filter is persistent in the SMEM
}
/// Returns the coordinate in the filter tensor W that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int k = filter_k_ + iteration_vector_ * AccessType::kElements;
int trs = offset_trs_[iteration_strided_];
return TensorCoord(k, trs, 0 , 0); // As a 2D-matrix
}
/// Returns true if the current coordinate is within the activations tensor W
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.K &&
coord.h() < Shape::kColumn;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
int64_t offset = coord.n();
if (params_.is_convolution) {
offset += (Shape::kColumn - coord.h() - 1)* problem_size_.K;
} else {
offset += coord.h() * problem_size_.K;
}
return reinterpret_cast<AccessType const *>(pointer_ +
offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines the filter size loaded by iterator
CUTLASS_HOST_DEVICE
int get_load_size() {
return kFilterSize;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
// check whether runtime filter size is same as templated filter size.
if ((problem_size.R * problem_size.S) != Shape::kColumn) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,921 | C | 33.053435 | 118 | 0.672795 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_few_channels.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorCxRSKx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dFpropFilterTileAccessIteratorFewChannels {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kFewChannels;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kPositionsPerTile = Shape::kRow;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static bool const kUseFastDivmodPrologue = true;
static bool const kUseFastDivmodMainloop = true;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dFewChannelsParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
int rsc_index_;
int offset_k_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dFpropFilterTileAccessIteratorFewChannels(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
rsc_index_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
rsc_index_ = (threadblock_offset.row() + thread_coord.contiguous());
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] = threadblock_offset.column() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * 8 / sizeof_bits<Element>::value;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
rsc_index_ += kPositionsPerTile * problem_size_.split_k_slices;
}
/// Returns the coordinate in the filter tensor W that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int rsc_index = rsc_index_ + iteration_vector_ * AccessType::kElements;
int c = 0;
int s = 0;
int r = 0;
if (kUseFastDivmodMainloop) {
int rs_index = params_.divmod_C.divmod(c, rsc_index);
r = params_.divmod_S.divmod(s, rs_index);
}
else {
c = (rsc_index % problem_size_.C);
int rs_index = (rsc_index / problem_size_.C);
s = (rs_index % problem_size_.S);
r = (rs_index / problem_size_.S);
}
int k = offset_k_[iteration_strided_];
return TensorCoord(k, r, s, c);
}
/// Returns true if the current coordinate is within the activations tensor W
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
bool in_bounds =
coord.n() < problem_size_.K &&
coord.h() >= 0 &&
coord.h() < problem_size_.R &&
coord.c() < problem_size_.C;
return in_bounds;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
int32_t offset =
coord.n() * params_.stride_n +
coord.h() * params_.stride_h +
coord.w() * params_.stride_w +
coord.c();
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dFpropFilterTileAccessIteratorFewChannels &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
if (platform::is_same<Layout, layout::TensorCxRSKx<32>>::value) {
if (problem_size.K % 32) {
return Status::kErrorInvalidProblem;
}
}
if (platform::is_same<Layout, layout::TensorCxRSKx<64>>::value) {
if (problem_size.K % 64) {
return Status::kErrorInvalidProblem;
}
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 9,314 | C | 31.12069 | 107 | 0.660726 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/implicit_gemm_pipelined.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator | MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Transformation applied to A operand
typename TransformA_ = NumericArrayConverter<
typename SmemIteratorA_::Element,
typename IteratorA_::Element,
IteratorA_::Fragment::kElements>,
///
/// Transformation applied to A operand
typename TransformB_ = NumericArrayConverter<
typename SmemIteratorB_::Element,
typename IteratorB_::Element,
IteratorB_::Fragment::kElements>,
/// Used for partial specialization
typename Enable = bool
>
class ImplicitGemmPipelined : public gemm::threadblock::MmaBase<Shape_, Policy_, 2> {
public:
///< Base class
using Base = gemm::threadblock::MmaBase<Shape_, Policy_, 2>;
using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory
using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory
using ElementC = ElementC_; ///< Data type of accumulator matrix
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
using Policy = Policy_; ///< Policy describing tuning details
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
//
// Dependent types
//
/// Fragment of operand A loaded from global memory
using FragmentA = typename IteratorA::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB = typename IteratorB::Fragment;
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Obtain the arch tag from the warp-level operator
using ArchTag = typename Policy::Operator::ArchTag;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
static_assert((Base::kStages==2), "MmaPipelined requires kStages set to value 2");
private:
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
protected:
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
ImplicitGemmPipelined(
typename Base::SharedStorage &shared_storage, ///< Shared storage needed for internal use by threadblock-scoped GEMM
int thread_idx, ///< ID within the threadblock
int warp_idx, ///< ID of warp
int lane_idx ///< ID of each thread within a warp
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset({warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset({Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
int gemm_k_iterations, ///< number of iterations of the mainloop
FragmentC &accum, ///< destination accumulator tile
IteratorA iterator_A, ///< iterator over A operand in global memory
IteratorB iterator_B, ///< iterator over B operand in global memory
FragmentC const &src_accum, ///< source accumulator tile
int gemm_k_iterations_per_channel = 0, ///< number of iterations per channel
TransformA transform_A = TransformA(), ///< transformation applied to A fragment
TransformB transform_B = TransformB()) { ///< transformation applied to B fragment
//
// Prologue
//
// Perform accumulation in the 'd' output operand
accum = src_accum;
FragmentA tb_frag_A;
FragmentB tb_frag_B;
tb_frag_A.clear();
tb_frag_B.clear();
// The last kblock is loaded in the prolog
iterator_A.load(tb_frag_A);
iterator_B.load(tb_frag_B);
++iterator_A;
++iterator_B;
this->smem_iterator_A_.store(transform_A(tb_frag_A));
this->smem_iterator_B_.store(transform_B(tb_frag_B));
++this->smem_iterator_A_;
++this->smem_iterator_B_;
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math instructions
WarpFragmentA warp_frag_A[2];
WarpFragmentB warp_frag_B[2];
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_A[0]);
this->warp_tile_iterator_B_.load(warp_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
Operator warp_mma;
int smem_write_stage_idx = 1;
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing
// shared memory loads (which have the tighest latency requirement).
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group
// as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(transform_A(tb_frag_A));
this->smem_iterator_B_.store(transform_B(tb_frag_B));
__syncthreads();
++this->smem_iterator_A_;
++this->smem_iterator_B_;
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
}
else {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations,
0});
}
smem_write_stage_idx ^= 1;
}
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k == 0) {
iterator_A.load(tb_frag_A);
iterator_B.load(tb_frag_B);
++iterator_A;
++iterator_B;
}
warp_mma(accum, warp_frag_A[warp_mma_k % 2],
warp_frag_B[warp_mma_k % 2], accum);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 12,174 | C | 36.928349 | 126 | 0.633728 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_params.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Extracts the host-params objects into non-template code.
*/
#pragma once
#define TRACE_CONV_PARAMS_INITIALIZERS_ENABLED 0
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#if TRACE_CONV_PARAMS_INITIALIZERS_ENABLED
#include <fstream>
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Params structure used for all Conv2d analytic tile iterators
template< typename Layout_ = layout::TensorNHWC >
struct Conv2dAnalyticParams {
using Layout = Layout_;
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dAnalyticParams() { }
CUTLASS_HOST_DEVICE
Conv2dAnalyticParams(
Conv2dProblemSize const &, // unused; placeholder to match other Params interfaces.
Layout const &layout
): layout(layout) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Params structure used for all Conv2d analytic tile iterators
template< typename Layout_ = layout::TensorNHWC >
struct Conv2dFewChannelsParams {
using Layout = Layout_;
int32_t stride_w;
int32_t stride_h;
int32_t stride_n;
FastDivmod divmod_P;
FastDivmod divmod_Q;
FastDivmod divmod_S;
FastDivmod divmod_C;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFewChannelsParams() { }
CUTLASS_HOST_DEVICE
Conv2dFewChannelsParams(
Conv2dProblemSize const &problem_size, // unused; placeholder to match other Params interfaces.
Layout const &layout
):
stride_w(int32_t(layout.stride()[0])),
stride_h(int32_t(layout.stride()[1])),
stride_n(int32_t(layout.stride()[2])),
divmod_P(problem_size.P),
divmod_Q(problem_size.Q),
divmod_S(problem_size.S),
divmod_C(problem_size.C)
{
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for Conv2dDgradOutputGradientTileAccessIteratorAnalyticParams
struct Conv2dDgradOutputGradientTileAccessIteratorAnalyticParams {
using Layout = layout::TensorNHWC;
Layout layout;
int tiled_rows_per_filter;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorAnalyticParams() { }
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorAnalyticParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape
): layout(layout) {
int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, threadblock_shape.row());
tiled_rows_per_filter = tile_m_per_filter * threadblock_shape.row();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
#if TRACE_CONV_PARAMS_INITIALIZERS_ENABLED
CUTLASS_HOST_DEVICE
void TraceIteratorParams(
char const *conv_operator,
char const *operand,
int element_size_bits,
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
) {
#if !defined(__CUDA_ARCH__)
char const *fname = "conv_iterator_params.csv";
std::ifstream test(fname);
bool file_exists = test.is_open();
if (file_exists) {
test.close();
}
std::ofstream trace("conv_iterator_params.csv", std::ofstream::app);
if (!file_exists) {
trace
<< "Operator,Operand,ElementSize,CtaRows,CtaColumns,ThreadCount,AccessSize,"
<< "IterationsContiguous,IterationsStrided,DeltaContiguous,DeltaStrided\n";
}
trace << conv_operator << "," << operand << "," << element_size_bits << ","
<< threadblock_shape.row() << "," << threadblock_shape.column()
<< "," << thread_count << "," << access_size
<< "," << threadmap_iterations.contiguous() << "," << threadmap_iterations.strided()
<< "," << threadmap_delta.contiguous() << "," << threadmap_delta.strided() << "\n";
#endif
}
#define TRACE_CONV_INITIALIZERS(conv_op, operand, element_size, cta_shape, thread_count, access_size, iterations, delta) \
TraceIteratorParams(conv_op, operand, element_size, cta_shape, thread_count, access_size, iterations, delta);
#else
#define TRACE_CONV_INITIALIZERS(conv_op, operand, element_size, cta_shape, thread_count, access_size, iterations, delta) {}
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for Conv2dFpropActivationTileIteratorOptimized
template< typename Layout_ = layout::TensorNHWC >
struct Conv2dFpropActivationIteratorOptimizedParams;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for Conv2dFpropActivationTileIteratorOptimized
template<>
struct Conv2dFpropActivationIteratorOptimizedParams<layout::TensorNHWC> {
using Layout = layout::TensorNHWC;
Layout layout;
int64_t inc_next[3]; // {next S, next R, next C}
int filter_c_delta; // number of logical elements to add to filter_c_
int PQ; // product of P*Q
FastDivmod pq_divmod;
FastDivmod q_divmod;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFpropActivationIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dFpropActivationIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout),
PQ(problem_size.P * problem_size.Q),
pq_divmod(PQ),
q_divmod(problem_size.Q) {
TRACE_CONV_INITIALIZERS("conv2d_fprop", "activation",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
int conv_sign = (problem_size.mode == Mode::kConvolution ? -1 : 1);
// next S
inc_next[0] = conv_sign * (
int64_t(layout.stride()[0]) * problem_size.dilation_w
) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
int64_t(layout.stride()[1]) * problem_size.dilation_h
- (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next C
inc_next[2] = (
threadblock_shape.column() * problem_size.split_k_slices
- conv_sign * int64_t(problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h
- conv_sign * int64_t(problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_c_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
#if ENABLE_CONV2D_PARAMS_PRINT
/// Prints internal state.
CUTLASS_HOST_DEVICE
void print() {
auto stride = layout.stride();
printf(
"Conv2dFpropActivationIteratorOptimizedParams:\n"
" layout(w: %d, h: %d, n: %d)\n"
" inc_next[%ld, %ld, %ld]\n"
" filter_c_delta(%d) - PQ(%d)\n"
" pq_divmod(divisor: %d, multiplier: %u, shift_right: %u)\n"
" q_divmod(divisor: %d, multiplier: %u, shift_right: %u)\n",
stride[0], stride[1], stride[2],
inc_next[0], inc_next[1], inc_next[2],
filter_c_delta,
PQ,
pq_divmod.divisor,
pq_divmod.multiplier,
pq_divmod.shift_right,
q_divmod.divisor,
q_divmod.multiplier,
q_divmod.shift_right
);
}
#endif
};
/// Parameters structure used for Conv2dFpropActivationTileIteratorOptimized
template <int Interleaved_>
struct Conv2dFpropActivationIteratorOptimizedParams<layout::TensorNCxHWx<Interleaved_>> {
static int const kInterleaved = Interleaved_;
using Layout = layout::TensorNCxHWx<kInterleaved>;
Layout layout;
int64_t inc_next[3]; // {next S, next R, next C}
int filter_c_delta; // number of logical elements to add to filter_c_
int PQ; // product of P*Q
FastDivmod pq_divmod;
FastDivmod q_divmod;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFpropActivationIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dFpropActivationIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout), PQ(problem_size.P * problem_size.Q), pq_divmod(PQ), q_divmod(problem_size.Q) {
TRACE_CONV_INITIALIZERS("conv2d_fprop", "activation",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
int conv_sign = (problem_size.mode == Mode::kConvolution ? -1 : 1);
// next S
inc_next[0] = conv_sign * (kInterleaved * problem_size.dilation_w) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
int64_t(layout.stride()[0]) * problem_size.dilation_h
- (problem_size.S - 1) * kInterleaved * problem_size.dilation_w
) * element_size_bits / 8;
// next C
inc_next[2] = (
threadblock_shape.column() * problem_size.split_k_slices / kInterleaved * int64_t(layout.stride()[1])
- conv_sign * int64_t(problem_size.R - 1) * layout.stride()[0] * problem_size.dilation_h
- conv_sign * int64_t(problem_size.S - 1) * kInterleaved * problem_size.dilation_w
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_c_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Layout_ = layout::TensorNHWC >
struct Conv2dFpropFilterIteratorOptimizedParams;
/////////////////////////////////////////////////////////////////////////////////////////////////
template<>
struct Conv2dFpropFilterIteratorOptimizedParams<layout::TensorNHWC>
{
using Layout = layout::TensorNHWC;
Layout layout;
int RS;
int filter_c_delta;
int64_t inc_next_k; // offset in units of bytes to next K position
int64_t inc_next_rs; // offset in units of bytes to next RS position
int64_t inc_next_c; // offset in units of bytes to next C position
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFpropFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dFpropFilterIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout) {
TRACE_CONV_INITIALIZERS("conv2d_fprop", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
RS = problem_size.R * problem_size.S;
inc_next_k = (int64_t(layout.stride()[2]) * threadmap_delta.strided() * element_size_bits) / 8;
inc_next_rs =
( int64_t(layout.stride()[0])
- int64_t(layout.stride()[2]) * (threadmap_iterations.strided() - 1) * threadmap_delta.strided()
) * element_size_bits / 8;
inc_next_c =
(
threadblock_shape.row() * problem_size.split_k_slices
- int64_t(RS - 1) * layout.stride()[0]
- int64_t(threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[2]
) * element_size_bits / 8;
filter_c_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
#if ENABLE_CONV2D_PARAMS_PRINT
/// Prints internal state.
CUTLASS_HOST_DEVICE
void print() {
auto stride = layout.stride();
printf(
"Conv2dFpropFilterIteratorOptimizedParams:\n"
" layout[%d, %d, %d]\n"
" RS(%d), filter_c_delta(%d), inc_next(k: %ld, rs: %ld, c: %ld)\n",
stride[0], stride[1], stride[2],
RS,
filter_c_delta,
inc_next_k, inc_next_rs, inc_next_c
);
}
#endif
};
template<int Interleaved_>
struct Conv2dFpropFilterIteratorOptimizedParams<layout::TensorCxRSKx<Interleaved_>>
{
static int const kInterleaved = Interleaved_;
using Layout = layout::TensorCxRSKx<kInterleaved>;
Layout layout;
int RS;
int filter_c_delta;
int64_t inc_next_k; // offset in units of bytes to next K position
int64_t inc_next_rs; // offset in units of bytes to next RS position
int64_t inc_next_c; // offset in units of bytes to next C position
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dFpropFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dFpropFilterIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout) {
TRACE_CONV_INITIALIZERS("conv2d_fprop", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
RS = problem_size.R * problem_size.S;
inc_next_k = (kInterleaved * threadmap_delta.strided() * element_size_bits) / 8;
inc_next_rs =
( int64_t(layout.stride()[0])
- kInterleaved * (threadmap_iterations.strided() - 1) * threadmap_delta.strided()
) * element_size_bits / 8;
inc_next_c =
(
threadblock_shape.row() * problem_size.split_k_slices / kInterleaved * int64_t(layout.stride()[2])
- int64_t(RS - 1) * layout.stride()[0]
- int64_t(threadmap_iterations.strided() - 1) * threadmap_delta.strided() * kInterleaved
) * element_size_bits / 8;
filter_c_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Dgrad Optimized Dy params (layout::TensorNHWC)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters object for Conv2d DGRAD OutputGradient (dy) iterator
struct Conv2dDgradOutputGradientIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int64_t inc_next[3]; // {next S, next R, next K}
int filter_k_delta; // number of logical elements to add to filter_k_
int HW; // product of H*W
FastDivmod hw_divmod;
FastDivmod w_divmod;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout),
HW(problem_size.H *problem_size.W),
hw_divmod(HW),
w_divmod(problem_size.W) {
TRACE_CONV_INITIALIZERS("conv2d_dgrad", "output_gradient",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
int conv_sign = (problem_size.mode == Mode::kConvolution ? 1 : -1);
// next S
inc_next[0] = conv_sign * (
(int64_t)layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
(int64_t)layout.stride()[1] * problem_size.dilation_h
- (problem_size.S - 1) * (int64_t)layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next K
inc_next[2] = (
threadblock_shape.column() * problem_size.split_k_slices
- conv_sign * (problem_size.R - 1) * (int64_t)layout.stride()[1] * problem_size.dilation_h
- conv_sign * (problem_size.S - 1) * (int64_t)layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_k_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Strided Dgrad Optimized Dy params (layout::TensorNHWC)
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Conv2dStridedDgradOutputGradientIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int64_t inc_next[3]; // {next S, next R, next K}
int filter_k_delta; // number of logical elements to add to filter_k_
int tiled_rows_per_filter;
int conv_sign;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dStridedDgradOutputGradientIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dStridedDgradOutputGradientIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape
): layout(layout) {
int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, threadblock_shape.row());
tiled_rows_per_filter = tile_m_per_filter * threadblock_shape.row();
conv_sign = (problem_size.mode == Mode::kConvolution ? 1 : -1);
// next S
inc_next[0] = conv_sign * (
(int64_t)layout.stride()[0] * problem_size.dilation_w
) * element_size_bits / 8;
// next R
inc_next[1] = conv_sign * (
(int64_t)layout.stride()[1] * problem_size.dilation_h
) * element_size_bits / 8;
// next K
inc_next[2] = (
threadblock_shape.column() * problem_size.split_k_slices
) * element_size_bits / 8;
// logical offset added to internal channel counter - units are elements, not bytes
filter_k_delta = threadblock_shape.column() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
// Dgrad Optimized w params (layout::TensorNHWC)
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Conv2dDgradFilterIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int RS;
int filter_k_delta;
int64_t inc_next_strided; // offset in units of bytes to next K coordinate within tile
int64_t inc_next_rs; // offset in units of bytes to next RS position
int64_t inc_next_k; // offset in units of bytes to next K position in subsequent tile
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dDgradFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dDgradFilterIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout), RS(problem_size.R * problem_size.S) {
TRACE_CONV_INITIALIZERS("conv2d_dgrad", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
inc_next_strided = ((int64_t)layout.stride()[2] * threadmap_delta.strided() * element_size_bits) / 8;
inc_next_rs =
( (int64_t)layout.stride()[0]
- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[2]
) * element_size_bits / 8;
inc_next_k =
(
threadblock_shape.row() * problem_size.split_k_slices * (int64_t)layout.stride()[2]
- (problem_size.R * problem_size.S - 1) * (int64_t)layout.stride()[0]
- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[2]
) * element_size_bits / 8;
filter_k_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
// StridedDgrad Optimized w params (layout::TensorNHWC)
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Conv2dStridedDgradFilterIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int RS;
int filter_k_delta;
int64_t inc_next_strided; // offset in units of bytes to next K coordinate within tile
int64_t inc_next[3]; // {next S, next R, next K}
int64_t reset_bytes; // offset in units of bytes to move back the pointer
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dStridedDgradFilterIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dStridedDgradFilterIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout), RS(problem_size.R * problem_size.S) {
TRACE_CONV_INITIALIZERS("conv2d_dgrad", "filter",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
inc_next_strided = (layout.stride()[2] * threadmap_delta.strided() * element_size_bits) / 8;
// next S
inc_next[0] =
( (int64_t)layout.stride()[0] * problem_size.stride_w
//- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[2]
) * element_size_bits / 8;
// next R
inc_next[1] =
( (int64_t)layout.stride()[1] * problem_size.stride_h
//- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[2]
) * element_size_bits / 8;
// next K
inc_next[2] =
(
threadblock_shape.row() * problem_size.split_k_slices * (int64_t)layout.stride()[2]
//- (problem_size.R * problem_size.S - 1) * layout.stride()[0]
//- (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[2]
) * element_size_bits / 8;
// offset in units of bytes to move the pointer in backward direction
reset_bytes = (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[2]
* element_size_bits / 8;
filter_k_delta = threadblock_shape.row() * problem_size.split_k_slices;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters object for Conv2d WGRAD Output Gradient (dy) iterator
struct Conv2dWgradOutputGradientIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
int NPQ; // precomputd product of N*P*Q for clearing predicates
FastDivmod pq_divmod;
FastDivmod q_divmod;
int64_t offset_next_strided; // offset in units of bytes to next npq coordinate within tile
int64_t offset_next_contiguous; // offset in units of bytes to next k coordinate within tile
int64_t inc_next_npq; // offset in units of bytes to next npq position in subsequent tile
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dWgradOutputGradientIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dWgradOutputGradientIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
layout(layout),
NPQ(problem_size.N * problem_size.P * problem_size.Q),
pq_divmod(problem_size.P * problem_size.Q),
q_divmod(problem_size.Q) {
TRACE_CONV_INITIALIZERS("conv2d_wgrad", "output_gradient",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
// Incremental offsets in unites of bytes (number of elements) * sizeof_bits<Element>::value / 8
offset_next_strided = (threadmap_delta.strided() * (int64_t)layout.stride()[0])
* element_size_bits / 8;
offset_next_contiguous = (threadmap_delta.contiguous())
* element_size_bits / 8;
inc_next_npq = (threadblock_shape.column() * problem_size.split_k_slices * (int64_t)layout.stride()[0])
* element_size_bits / 8;
}
};
struct Conv2dWgradActivationIteratorOptimizedParams {
using Layout = layout::TensorNHWC;
Layout layout;
FastDivmod sc_divmod;
FastDivmod pq_divmod;
FastDivmod q_divmod;
FastDivmod c_divmod;
FastDivmod s_divmod;
int small_channel_conv_s_offset;
//
// Methods
//
CUTLASS_HOST_DEVICE
Conv2dWgradActivationIteratorOptimizedParams() { }
CUTLASS_HOST_DEVICE
Conv2dWgradActivationIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout
):
layout(layout),
sc_divmod(problem_size.S * problem_size.C),
pq_divmod(problem_size.P * problem_size.Q),
q_divmod(problem_size.Q),
c_divmod(problem_size.C),
s_divmod(problem_size.S * problem_size.dilation_w),
small_channel_conv_s_offset((problem_size.S - 1) * problem_size.dilation_w - problem_size.pad_w) {
}
CUTLASS_HOST_DEVICE
Conv2dWgradActivationIteratorOptimizedParams(
Conv2dProblemSize const &problem_size,
Layout const &layout,
int element_size_bits, ///< size of each element in bits
MatrixCoord threadblock_shape,
int thread_count,
int access_size,
layout::PitchLinearCoord threadmap_iterations,
layout::PitchLinearCoord threadmap_delta
):
Conv2dWgradActivationIteratorOptimizedParams(
problem_size,
layout
) {
TRACE_CONV_INITIALIZERS("conv2d_wgrad", "activation",
element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta);
}
};
struct PredicatedScaleBiasVectorAccessIteratorParams {
public:
/// Default ctor
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIteratorParams() { }
// Default ctor
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIteratorParams(
Conv2dProblemSize const &problem_size,
layout::PitchLinear const &layout) {}
// Default ctor
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIteratorParams(
Conv2dProblemSize const &problem_size,
layout::RowMajor const &layout) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 30,197 | C | 32.778523 | 123 | 0.619863 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dDgradFilterTileAccessIteratorOptimized;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradFilterTileAccessIteratorOptimized unity strided dgrad is more performant for dgrad
// on problem sizes with stride = {1x1}
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradFilterTileAccessIteratorOptimized <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kStrided,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Parameters structure
//
struct Params : Conv2dStridedDgradFilterIteratorOptimizedParams {
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Conv2dStridedDgradFilterIteratorOptimizedParams const &base):
Conv2dStridedDgradFilterIteratorOptimizedParams(base) { }
CUTLASS_HOST_DEVICE
Params(
Conv2dProblemSize const &problem_size,
Layout const &layout
):
Conv2dStridedDgradFilterIteratorOptimizedParams(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
) { }
};
private:
Conv2dStridedDgradFilterIteratorOptimizedParams const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
uint32_t predicates_[kAccessesPerVector];
int filter_k_;
int filter_r_;
int filter_s_;
int start_r_;
int start_s_;
int64_t reset_bytes_s_;
int64_t reset_bytes_r_;
//
// Assertions
//
// We map predicates into bits packed in this uint32_t container
static_assert(ThreadMap::Iterations::kStrided *
ThreadMap::Iterations::kContiguous < sizeof(predicates_) * 8,
"Currently, the number of loads per iteration is limited by the size of the predicates container.");
public:
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorOptimized(
Conv2dStridedDgradFilterIteratorOptimizedParams const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_{0},
filter_r_(start_r),
filter_s_(start_s),
start_r_(start_r),
start_s_(start_s) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.row() + thread_coord.strided();
Index column = threadblock_offset.column() + thread_coord.contiguous();
reset_bytes_s_ = (problem_size_.num_gemm_k_filter_s(start_s_) - 1) * params_.inc_next[0];
reset_bytes_r_ = reset_bytes_s_ +
(problem_size_.num_gemm_k_filter_r(start_r_) - 1) * params_.inc_next[1];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int filter_k = filter_k_ + s * ThreadMap::Delta::kStrided;
int filter_c = column + c * ThreadMap::Delta::kContiguous;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
uint32_t pred = ((filter_k < problem_size_.K && (filter_c + v * AccessType::kElements) < problem_size_.C) ? 1u : 0);
int pred_idx = c + s * ThreadMap::Iterations::kContiguous;
predicates_[v] |= (pred << pred_idx);
}
}
}
TensorCoord coord{filter_k_, filter_r_, filter_s_, column};
pointer_ += params_.layout(coord) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_DEVICE
void advance() {
int next_idx = 0;
LongIndex reset_bytes = params_.reset_bytes;
// Move filter_s by stride_w
filter_s_ += problem_size_.stride_w;
if (filter_s_ >= problem_size_.S) {
// Restore filter_s
filter_s_ = start_s_;
// Move filter_r by stride_h
filter_r_ += problem_size_.stride_h;
#if 0
bool check = (filter_r_ < problem_size_.R);
filter_r_ = check ? filter_r_ : start_r_;
next_idx = check ? 1 : 2;
reset_bytes += (check ? reset_bytes_s_ : reset_bytes_r_);
#else
asm volatile(
"{\n\t"
" .reg .pred %%p;\n\t"
" .reg .s64 t1;\n\t"
" setp.lt.s32 %%p, %3, %4;\n\t"
" selp.s32 %0, %3, %5, %%p;\n\t"
" selp.s32 %1, 1, 2, %%p;\n\t"
" selp.s64 t1, %6, %7, %%p;\n\t"
" add.s64 %2, %8, t1;\n\t"
"}\n"
: "=r"(filter_r_), "=r"(next_idx), "=l"(reset_bytes)
: "r"(filter_r_), "r"(problem_size_.R), "r"(start_r_),
"l"(reset_bytes_s_), "l"(reset_bytes_r_), "l"(reset_bytes));
#endif
}
// offset pointers by offset_bytes
pointer_ += (params_.inc_next[next_idx] - reset_bytes);
if (next_idx == 2) {
filter_k_ += params_.filter_k_delta;
}
// Clear predicates if needed
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
if (filter_k_ + s * ThreadMap::Delta::kStrided >= problem_size_.K) {
uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous);
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
predicates_[v] = (predicates_[v] & (~kClearMask));
}
}
}
}
/// Returns true if the current coordinate is within the filter tensor W
CUTLASS_HOST_DEVICE
bool valid() {
LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous;
return (predicates_[iteration_vector_] & (1u << pred_idx));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
// Move to the next K coordinate within the tile
pointer_ += params_.inc_next_strided;
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradFilterTileAccessIteratorOptimized unity strided dgrad is more performant for dgrad
// on problem sizes with stride = {1x1}
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradFilterTileAccessIteratorOptimized <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kUnity,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Parameters structure
//
struct Params : Conv2dDgradFilterIteratorOptimizedParams {
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Conv2dDgradFilterIteratorOptimizedParams const &base):
Conv2dDgradFilterIteratorOptimizedParams(base) { }
CUTLASS_HOST_DEVICE
Params(
Conv2dProblemSize const &problem_size,
Layout const &layout
):
Conv2dDgradFilterIteratorOptimizedParams(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
) { }
};
private:
Conv2dDgradFilterIteratorOptimizedParams const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
uint32_t predicates_[kAccessesPerVector];
int filter_rs_;
int filter_k_;
//
// Assertions
//
// We map predicates into bits packed in this uint32_t container
static_assert(ThreadMap::Iterations::kStrided *
ThreadMap::Iterations::kContiguous < sizeof(predicates_) * 8,
"Currently, the number of loads per iteration is limited by the size of the predicates container.");
public:
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorOptimized(
Conv2dDgradFilterIteratorOptimizedParams const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
predicates_{0},
filter_rs_(0),
filter_k_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.row() + thread_coord.strided();
Index column = threadblock_offset.column() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int filter_k = filter_k_ + s * ThreadMap::Delta::kStrided;
int filter_c = column + c * ThreadMap::Delta::kContiguous;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
uint32_t pred = ((filter_k < problem_size_.K && (filter_c + v * AccessType::kElements) < problem_size_.C) ? 1u : 0);
int pred_idx = c + s * ThreadMap::Iterations::kContiguous;
predicates_[v] |= (pred << pred_idx);
}
}
}
pointer_ += (
filter_k_ * params.layout.stride()[2] + column
) * sizeof_bits<Element>::value / 8;
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
LongIndex next = params_.inc_next_rs;
// moves to the next tile
++filter_rs_;
if (filter_rs_ == params_.RS) {
filter_rs_ = 0;
next = params_.inc_next_k;
filter_k_ += params_.filter_k_delta;
}
// Clear predicates if needed
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
if (filter_k_ + s * ThreadMap::Delta::kStrided >= problem_size_.K) {
uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous);
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
predicates_[v] = (predicates_[v] & (~kClearMask));
}
}
}
pointer_ += next;
}
/// Returns true if the current coordinate is within the filter tensor W
CUTLASS_HOST_DEVICE
bool valid() {
LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous;
return (predicates_[iteration_vector_] & (1u << pred_idx));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8) + iteration_vector_;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradFilterTileAccessIteratorOptimized &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
// Move to the next K coordinate within the tile
pointer_ += params_.inc_next_strided;
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 19,735 | C | 30.832258 | 126 | 0.64829 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_dgrad_filter_tile_access_iterator_analytic.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dDgradFilterTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or larger.");
//
// Parameters structure
//
struct Params {
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
Conv3dProblemSize const &problem_size,
Layout const &layout
): layout(layout) {
}
};
private:
Params const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
// For a fixed filter position (t,r,s) find and fill offset_k_, offset_c_ in strided and contiguous dimension
int filter_t_;
int filter_r_;
int filter_s_;
int offset_k_[ThreadMap::Iterations::kStrided];
int offset_c_[ThreadMap::Iterations::kContiguous];
public:
CUTLASS_HOST_DEVICE
Conv3dDgradFilterTileAccessIteratorAnalytic(
Params const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_t_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
offset_c_[c] = threadblock_offset.column() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
}
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] =
threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
return;
}
filter_t_ = 0;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] += Shape::kRow * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the filter tensor w that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int c = offset_c_[iteration_contiguous_];
int k = offset_k_[iteration_strided_];
return TensorCoord(k, filter_t_, filter_r_, filter_s_, c);
}
/// Returns true if the current coordinate is within the filter tensor w
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.K && coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dDgradFilterTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,450 | C | 30.416357 | 112 | 0.650296 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_fixed_channels.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorCxRSKx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dFpropFilterTileAccessIteratorFixedChannels {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kFixedChannels;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kFilterPositionsPerTile = Shape::kRow / AccessType::kElements;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static bool const kUseFastDivmodPrologue = true;
static bool const kUseFastDivmodMainloop = true;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dFewChannelsParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
int rs_index_;
int offset_k_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dFpropFilterTileAccessIteratorFixedChannels(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
rs_index_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
rs_index_ = (threadblock_offset.row() + thread_coord.contiguous()) / AccessType::kElements;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] = threadblock_offset.column() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * 8 / sizeof_bits<Element>::value;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
rs_index_ += kFilterPositionsPerTile * problem_size_.split_k_slices;
}
/// Returns the coordinate in the filter tensor W that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int rs_index = rs_index_ + iteration_vector_;
int r = 0;
int s = 0;
if (kUseFastDivmodMainloop) {
r = params_.divmod_S.divmod(s, rs_index);
}
else {
s = (rs_index % problem_size_.S);
r = (rs_index / problem_size_.S);
}
int k = offset_k_[iteration_strided_];
return TensorCoord(k, r, s, 0);
}
/// Returns true if the current coordinate is within the activations tensor W
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.K && coord.h() >= 0 && coord.h() < problem_size_.R;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
int32_t offset =
coord.n() * params_.stride_n +
coord.h() * params_.stride_h +
coord.w() * params_.stride_w + coord.c();
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dFpropFilterTileAccessIteratorFixedChannels &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C != AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
if (platform::is_same<Layout, layout::TensorCxRSKx<32>>::value) {
if (problem_size.K % 32) {
return Status::kErrorInvalidProblem;
}
}
if (platform::is_same<Layout, layout::TensorCxRSKx<64>>::value) {
if (problem_size.K % 64) {
return Status::kErrorInvalidProblem;
}
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 9,018 | C | 31.677536 | 107 | 0.664892 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorCxRSKx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>,
conv::GroupMode GroupMode_ = conv::GroupMode::kNone
>
class Conv2dFpropFilterTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static conv::GroupMode const kGroupMode = GroupMode_;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dAnalyticParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
int filter_r_;
int filter_s_;
int filter_c_;
int filter_c_init_;
int crs_cnt_;
int crs_per_group_;
int group_idx_offset_c_;
int channels_per_group_;
int offset_k_[ThreadMap::Iterations::kStrided];
int group_idx_offset_k_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dFpropFilterTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
crs_cnt_(0),
group_idx_offset_c_(0),
filter_r_(0),
filter_s_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.row() + thread_coord.contiguous();
if (kGroupMode != conv::GroupMode::kNone) {
filter_c_init_ = filter_c_;
if (kGroupMode == conv::GroupMode::kDepthwise){
channels_per_group_ = 1;
crs_per_group_ = problem_size_.S * problem_size_.R;
} else {
channels_per_group_ = problem_size_.C / problem_size_.groups;
crs_per_group_ = problem_size_.S * problem_size_.R * ((channels_per_group_ + Shape::kRow - 1) / Shape::kRow);
}
}
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] = threadblock_offset.column() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
if (kGroupMode != conv::GroupMode::kNone && kGroupMode != conv::GroupMode::kDepthwise) {
group_idx_offset_k_[s] = (thread_coord.strided() + s * ThreadMap::Delta::kStrided) / (problem_size_.K / problem_size_.groups);
}
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * 8 / sizeof_bits<Element>::value;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
if (kGroupMode != conv::GroupMode::kNone) {
++crs_cnt_;
}
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
if (kGroupMode == conv::GroupMode::kNone) {
filter_c_ += Shape::kRow * problem_size_.split_k_slices;
} else {
if (crs_cnt_ == crs_per_group_) {
crs_cnt_ = 0;
filter_c_ = filter_c_init_;
if (kGroupMode != conv::GroupMode::kDepthwise) {
// moves to next group
++group_idx_offset_c_;
}
} else {
filter_c_ += Shape::kRow * problem_size_.split_k_slices;
}
}
}
/// Returns the coordinate in the filter tensor W that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int k = offset_k_[iteration_strided_];
int c = filter_c_ + iteration_vector_ * AccessType::kElements;
return TensorCoord(k, filter_r_, filter_s_, c);
}
/// Returns true if the current coordinate is within the activations tensor W
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
if (kGroupMode == conv::GroupMode::kNone) {
return coord.n() < problem_size_.K && coord.c() < problem_size_.C;
} else if (kGroupMode == conv::GroupMode::kDepthwise) {
return coord.n() < problem_size_.K && coord.c() < 1; // channels_per_group_ is always equal to ONE.
} else {
return coord.n() < problem_size_.K && coord.c() < channels_per_group_ &&
group_idx_offset_c_ == group_idx_offset_k_[iteration_strided_];
}
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dFpropFilterTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
if (platform::is_same<Layout, layout::TensorCxRSKx<32>>::value) {
if (problem_size.K % 32) {
return Status::kErrorInvalidProblem;
}
}
if (platform::is_same<Layout, layout::TensorCxRSKx<64>>::value) {
if (problem_size.K % 64) {
return Status::kErrorInvalidProblem;
}
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 10,627 | C | 32.2125 | 134 | 0.645808 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_wgrad_activation_tile_access_iterator_analytic.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (activation tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dWgradActivationTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
struct Params {
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
Conv3dProblemSize const &problem_size,
Layout const &layout
): layout(layout) {
}
};
private:
Params const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
// Filter postion (t,r,s,c) in contiguous dimension stays constant for each gemm_iteration_k
int filter_t_[ThreadMap::Iterations::kContiguous];
int filter_r_[ThreadMap::Iterations::kContiguous];
int filter_s_[ThreadMap::Iterations::kContiguous];
int filter_c_[ThreadMap::Iterations::kContiguous];
int offset_nzpq_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv3dWgradActivationTileAccessIteratorAnalytic(
Params const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
// initialize t,r,s,c filter position for every contiguous iteration
CUTLASS_PRAGMA_UNROLL
for(int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int trsc_offset = threadblock_offset.column() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
filter_t_[c] = trsc_offset / (problem_size_.R * problem_size_.S * problem_size_.C);
int residual = trsc_offset % (problem_size_.R * problem_size_.S * problem_size_.C);
filter_r_[c] = residual / (problem_size_.S * problem_size_.C);
residual = residual % (problem_size_.S * problem_size_.C);
filter_s_[c] = residual / problem_size_.C;
filter_c_[c] = residual % problem_size_.C;
}
// initialize n, z, p, q offset for every strided iteration
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_nzpq_[s] = threadblock_offset.row() + thread_coord.strided()
+ s * ThreadMap::Delta::kStrided;
}
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_nzpq_) in GEMM-B by a CTA-K tile
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_nzpq_[s] += Shape::kRow * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the activation tensor x that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int t = filter_t_[iteration_contiguous_];
int r = filter_r_[iteration_contiguous_];
int s = filter_s_[iteration_contiguous_];
if (problem_size_.mode == Mode::kConvolution) {
t = (problem_size_.T - 1 - t);
r = (problem_size_.R - 1 - r);
s = (problem_size_.S - 1 - s);
}
int n = offset_nzpq_[iteration_strided_] / (problem_size_.Z * problem_size_.P * problem_size_.Q);
int residual = offset_nzpq_[iteration_strided_] % (problem_size_.Z * problem_size_.P * problem_size_.Q);
int z = residual / (problem_size_.P * problem_size_.Q);
residual = residual % (problem_size_.P * problem_size_.Q);
int p = residual / problem_size_.Q;
int q = residual % problem_size_.Q;
int d = z * problem_size_.stride_d - problem_size_.pad_d + t * problem_size_.dilation_d;
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
return TensorCoord(n, d, h, w, filter_c_[iteration_contiguous_]);
}
/// Returns true if the current coordinate is within the activation tensor x
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.d() >= 0 && coord.d() < problem_size_.D &&
coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W &&
coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dWgradActivationTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 9,971 | C | 33.386207 | 108 | 0.649283 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_
>
class Conv3dFpropActivationTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
using Mask = uint64_t;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv3dFpropActivationIteratorOptimizedParams<Layout>;
private:
Conv3dFpropActivationIteratorOptimizedParams<Layout> const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
// One pointer per access
char const *pointer_[ThreadMap::Iterations::kStrided];
// current filter position (t, r, s)
int filter_t_;
int filter_r_;
int filter_s_;
int filter_c_;
// mask for t, r, and s
Index masks_[ThreadMap::Iterations::kStrided][3];
public:
CUTLASS_HOST_DEVICE
Conv3dFpropActivationTileAccessIteratorOptimized(
Conv3dFpropActivationIteratorOptimizedParams<Layout> const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
) :
params_(params),
problem_size_(problem_size),
filter_t_(0),
filter_r_(0),
filter_s_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.column() + thread_coord.contiguous();
int offset_n[ThreadMap::Iterations::kStrided];
int offset_z[ThreadMap::Iterations::kStrided];
int offset_p[ThreadMap::Iterations::kStrided];
int offset_q[ThreadMap::Iterations::kStrided];
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] = reinterpret_cast<char const *>(ptr);
int offset_nzpq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// offset_n[s] = offset_nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q);
// int residual = offset_nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q);
//
// offset_z[s] = residual / (problem_size_.P * problem_size_.Q);
// residual = residual % (problem_size_.P * problem_size_.Q);
//
// offset_p[s] = residual / problem_size_.Q;
// offset_q[s] = residual % problem_size_.Q;
//
int residual;
// input: (nzpq offset) output: (n offset and resudial (zpq offset))
params.zpq_divmod(offset_n[s], residual, offset_nzpq);
// input: (zpq offset) output: (z offset and resudial (pq))
params.pq_divmod(offset_z[s], residual, residual);
// input: (pq offset) output: (p offset and resudial (q offset))
params.q_divmod(offset_p[s], offset_q[s], residual);
TensorCoord coord = at_(offset_n[s], offset_z[s], offset_p[s], offset_q[s], 0, 0, 0);
pointer_[s] += params_.layout(coord) * sizeof_bits<Element>::value / 8;
}
clear_mask();
// mask predicates for filter position T
CUTLASS_PRAGMA_NO_UNROLL
for (int t = 0; t < problem_size_.T; ++t) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int t_ = t;
if (problem_size_.mode == Mode::kConvolution) {
t_ = problem_size_.T - 1 - t;
}
int d = offset_z[s_idx] * problem_size_.stride_d - problem_size_.pad_d + t_ * problem_size_.dilation_d;
bool pred = (offset_n[s_idx] < problem_size_.N && d >= 0 && d < problem_size_.D);
masks_[s_idx][0] |= (pred << t);
}
}
// mask predicates for filter position R
CUTLASS_PRAGMA_NO_UNROLL
for (int r = 0; r < problem_size_.R; ++r) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int r_ = r;
if (problem_size_.mode == Mode::kConvolution) {
r_ = problem_size_.R - 1 - r;
}
int h = offset_p[s_idx] * problem_size_.stride_h - problem_size_.pad_h + r_ * problem_size_.dilation_h;
bool pred = (h >= 0 && h < problem_size_.H);
masks_[s_idx][1] |= (pred << r);
}
}
// mask predicates for filter position S
CUTLASS_PRAGMA_NO_UNROLL
for (int s = 0; s < problem_size_.S; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int s_idx = 0; s_idx < ThreadMap::Iterations::kStrided; ++s_idx) {
int s_ = s;
if (problem_size_.mode == Mode::kConvolution) {
s_ = problem_size_.S - 1 - s;
}
int w = offset_q[s_idx] * problem_size_.stride_w - problem_size_.pad_w + s_ * problem_size_.dilation_w;
bool pred = (w >= 0 && w < problem_size_.W);
masks_[s_idx][2] |= (pred << s);
}
}
if (filter_c_ >= problem_size.C) {
clear_mask();
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided});
}
private:
/// Returns the coordinate in the activations tensor X that is correspoinding to
// output nzpq and filter position t, r, s
CUTLASS_HOST_DEVICE
TensorCoord at_(int n, int z, int p, int q, int t, int r, int s) const {
if (problem_size_.mode == Mode::kConvolution) {
t = problem_size_.T - 1 - t;
r = problem_size_.R - 1 - r;
s = problem_size_.S - 1 - s;
}
int d = z * problem_size_.stride_d - problem_size_.pad_d + t * problem_size_.dilation_d;
int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h;
int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w;
return TensorCoord(n, d, h, w, filter_c_);
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_byte_offset_(LongIndex byte_offset) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
pointer_[s] += byte_offset;
}
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask_(bool clear) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
// We are using inline PTX assembly here to avoid an CUDA C++ compilation
// artifact in which control flow instructions are generated. Instead, our
// intent is to predicate the mov instructions.
#if defined(__CUDA_ARCH__)
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][0])
:
"r"((int)clear),
"r"(masks_[s][0])
);
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][1])
:
"r"((int)clear),
"r"(masks_[s][1])
);
asm volatile(
"{\n"
" .reg .pred p;\n"
" .reg .u32 m;"
" mov.u32 m, %2;"
" setp.ne.b32 p, %1, 0;\n"
" @p mov.u32 m, 0;\n"
" mov.u32 %0, m;\n"
"}\n"
:
"=r"(masks_[s][2])
:
"r"((int)clear),
"r"(masks_[s][2])
);
#else
if (clear) {
masks_[s][0] = 0;
masks_[s][1] = 0;
masks_[s][2] = 0;
}
#endif
}
}
public:
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
add_byte_offset_(pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_HOST_DEVICE
void advance() {
int next_idx = 0;
// moves to the next tile
++filter_s_;
if (filter_s_ == problem_size_.S) {
filter_s_ = 0;
++filter_r_;
next_idx = 1;
if (filter_r_ == problem_size_.R) {
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
next_idx = 2;
}
else {
filter_t_ = 0;
next_idx = 3;
}
}
}
add_byte_offset_(params_.inc_next[next_idx]);
if (next_idx == 3) {
filter_c_ += params_.filter_c_delta;
}
clear_mask_(filter_c_ >= problem_size_.C);
}
/// Clears the predicates
CUTLASS_HOST_DEVICE
void clear_mask() {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
masks_[s][0] = Mask(0);
masks_[s][1] = Mask(0);
masks_[s][2] = Mask(0);
}
}
CUTLASS_HOST_DEVICE
bool valid() {
return
(masks_[iteration_strided_][0] & (Index(1) << filter_t_)) &&
(masks_[iteration_strided_][1] & (Index(1) << filter_r_)) &&
(masks_[iteration_strided_][2] & (Index(1) << filter_s_));
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
return reinterpret_cast<AccessType const *>(pointer_[iteration_strided_]);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dFpropActivationTileAccessIteratorOptimized &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
// Conv3dFpropActivationTileAccessIteratorOptimized has constraint on filter positions
// due to the number of mask bits.
if (problem_size.T > 32 || problem_size.R > 32 || problem_size.S > 32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 15,132 | C | 30.592902 | 114 | 0.59318 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a multistage threadblock-scoped fused activation's scale+bias+relu and
Implicit GEMM Convolution kernel.
The original implicit gemm will store out-of-bound data as zeroes in the
shared memory because zeros into the tensor core, zeroes out of the tensor
cores. The result is remained the same. When fusing scale+bias+relu
into the mainloop, it is no longer true because
0 x scale + bias = bias
which is no longer always 0. So, instead of storing zeroes, this fused
kernel stores the out-of-bound data as a special NaN (0x7eff), when applying
scale+bias+relu, the code is like
if (data == 0x7eff)
data = 0;
else
data = scale+bias+relu(data, scale, bias);
The biggest difference compared with the fused Fprop and scale+bias+relu is
that scale and bias are loop invariant in Wgrad so that they only needs to
be loaded once before the mainloop.
See include/cutlass/conv/warp/scale_bias_relu_transformation.h for the
elementwise computation. See include/cutlass/arch/memory_sm80.h for nan fill.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/cache_operation.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/scale_bias_tile_iterator.h"
#include "cutlass/conv/warp/scale_bias_relu_transform.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Element type of scale and bias vectors
typename ElementScaleBias_,
/// Layout of scale and bias vectors
typename LayoutScaleBias_,
/// Element type of scale and bias vectors
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class MmaWgradFusionBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Element type of scale and bias vectors
using ElementScaleBias = ElementScaleBias_;
/// Layout of scale and bias vectors
using LayoutScaleBias = LayoutScaleBias_;
///< Policy describing tuning details
using Policy = Policy_;
//
// Dependent types
//
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = cutlass::gemm::GemmShape<Shape::kM / WarpGemm::kM,
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator::Policy::MmaShape::kK);
/// Number of stages
static int const kStages = Stages;
/// Tensor reference to the A operand
using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;
/// Tensor reference to the B operand
using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;
static_assert(kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
static_assert((kWarpGemmIterations % 2) == 0,
"Inner loop iteration must be an even number.");
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
class SharedStorage {
public:
//
// Type definitions
//
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,
Shape::kK * kStages +
Policy::SmemPaddingA::kColumn>;
/// Shape of the B matrix operand in shared memory
using ShapeB =
MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow,
Shape::kN + Policy::SmemPaddingB::kColumn>;
public:
//
// Data members
//
/// Buffer for A operand
AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A;
/// Buffer for B operand
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;
public:
//
// Methods
//
/// Returns a layout object for the A matrix
CUTLASS_DEVICE
static typename Operator::LayoutA LayoutA() {
return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator::LayoutB LayoutB() {
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});
}
/// Returns a TensorRef to the A operand
CUTLASS_HOST_DEVICE
TensorRefA operand_A_ref() {
return TensorRefA{operand_A.data(), LayoutA()};
}
/// Returns a TensorRef to the B operand
CUTLASS_HOST_DEVICE
TensorRefB operand_B_ref() {
return TensorRefB{operand_B.data(), LayoutB()};
}
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A operand from shared memory
typename Operator::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator::IteratorB warp_tile_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaWgradFusionBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx),
warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Iterates over vectors of scale and bias vector in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorScaleBias_,
/// Iterates over vectors of scale and bias vector i
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class ImplicitGemmWgradFusionMultistage
: public MmaWgradFusionBase<Shape_, typename IteratorScaleBias_::Element,
typename IteratorScaleBias_::Layout, Policy_, Stages> {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Iterates over tiles of the scale and bias vectors in global memory
using IteratorScaleBias = IteratorScaleBias_;
///< Policy describing tuning details
using Policy = Policy_;
///< Base class
using Base = MmaWgradFusionBase<Shape_, typename IteratorScaleBias::Element,
typename IteratorScaleBias::Layout, Policy_, Stages>;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
//
// Dependent types
//
/// Fragment of accumulator tile
using ElementC = typename Policy::Operator::ElementC;
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Internal structure exposed for introspection.
struct Detail {
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA =
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
static int const kBBufferSize =
((sizeof(typename Operator::ElementC) == 4) &&
((platform::is_same<typename Operator::Policy::Operator::ElementA,
typename Operator::ElementA>::value &&
platform::is_same<typename Operator::Policy::Operator::ElementB,
typename Operator::ElementB>::value)) &&
(Operator::Shape::kM >= 64 && Operator::Shape::kN >= 64))
? 1
: 2;
};
private:
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpLoadedFragmentScaleBias = typename IteratorScaleBias::Fragment;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
int warp_idx_m_;
int warp_idx_n_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
ImplicitGemmWgradFusionMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
warp_idx_m_ = warp_idx_mn % Base::WarpCount::kM;
warp_idx_n_ = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m_, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n_});
}
CUTLASS_DEVICE
void copy_tiles_and_advance(IteratorA &iterator_A,
IteratorB &iterator_B,
int group_start_A = 0, int group_start_B = 0) {
iterator_A.set_iteration_index(group_start_A);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr, iterator_A.get(), iterator_A.valid());
++iterator_A;
++this->smem_iterator_A_;
}
}
iterator_B.set_iteration_index(group_start_B);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess / 8;
// Uses nan fill for out of bound data
cutlass::arch::cp_async_nan<kSrcBytes, kCacheOpB>(
dst_ptr, iterator_B.get(), iterator_B.valid());
++iterator_B;
++this->smem_iterator_B_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
///< iterator over scale and bias vectors in global memory
IteratorScaleBias iterator_B_scale_bias,
///< initial value of accumulator
FragmentC const &src_accum,
///< number of iterations per channel
int gemm_k_iterations_per_channel = 0,
///< Imaginary strides used for planar-complex only - ignored here
int64_t imag_stride_A = 0,
int64_t imag_stride_B = 0) {
//
// Prologue
//
WarpLoadedFragmentScaleBias warp_loaded_frag_B_scale_bias;
iterator_B_scale_bias.add_tile_offset({0, warp_idx_n_});
iterator_B_scale_bias.load(warp_loaded_frag_B_scale_bias);
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr, iterator_A.get(), iterator_A.valid());
++iterator_A;
++this->smem_iterator_A_;
}
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess / 8;
// Uses Nan fill for out of bound data
cutlass::arch::cp_async_nan<kSrcBytes, kCacheOpB>(
dst_ptr, iterator_B.get(), iterator_B.valid());
++iterator_B;
++this->smem_iterator_B_;
}
// Move to the next stage
iterator_A.advance();
iterator_B.advance();
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Waits until kStages-2 stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA warp_loaded_frag_A[Detail::kBBufferSize];
WarpLoadedFragmentB warp_loaded_frag_B[2];
WarpTransformedFragmentA warp_transformed_frag_A[Detail::kBBufferSize];
WarpTransformedFragmentB warp_transformed_frag_B[2];
Operator warp_mma;
cutlass::conv::warp::WgradScaleBiasReluTransform<WarpTransformedFragmentB,
WarpLoadedFragmentScaleBias>
elementwise_transform;
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
// Start issuing the first group of the next stage outside of the mainloop
copy_tiles_and_advance(iterator_A, iterator_B);
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0],
warp_loaded_frag_A[0], warp_loaded_frag_B[0]);
elementwise_transform(warp_transformed_frag_B[0],
warp_loaded_frag_B_scale_bias);
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
if (Detail::kBBufferSize == 2) {
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % Detail::kBBufferSize]);
++this->warp_tile_iterator_A_;
}
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_B_;
if (warp_mma_k > 0) {
warp_mma.transform(warp_transformed_frag_A[warp_mma_k % Detail::kBBufferSize],
warp_transformed_frag_B[warp_mma_k % 2],
warp_loaded_frag_A[warp_mma_k % Detail::kBBufferSize],
warp_loaded_frag_B[warp_mma_k % 2]);
elementwise_transform(warp_transformed_frag_B[warp_mma_k % 2],
warp_loaded_frag_B_scale_bias);
}
warp_mma(
accum,
warp_transformed_frag_A[warp_mma_k % Detail::kBBufferSize],
warp_transformed_frag_B[warp_mma_k % 2],
accum
);
if (Detail::kBBufferSize == 1) {
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
++this->warp_tile_iterator_A_;
}
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % Detail::kBBufferSize],
warp_transformed_frag_B[(warp_mma_k + 1) % 2],
warp_loaded_frag_A[(warp_mma_k + 1) % Detail::kBBufferSize],
warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
elementwise_transform(
warp_transformed_frag_B[(warp_mma_k + 1) % 2],
warp_loaded_frag_B_scale_bias);
}
// Issue global->shared copies for the next stage
int group_start_iteration_A, group_start_iteration_B;
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
group_start_iteration_A = 0;
group_start_iteration_B = 0;
} else {
group_start_iteration_A =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB;
}
copy_tiles_and_advance(iterator_A, iterator_B,
group_start_iteration_A,
group_start_iteration_B);
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages of cp.async have committed
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A.advance();
iterator_B.advance();
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
}
}
}
// Insert fence and wait for all outstanding cp.async operations to commit.
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 26,320 | C | 35.056164 | 104 | 0.627014 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_fixed_channels.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile)
matrix from memory.
This iterator assumes TensorNHWC or TensorNCxHWx<Interleave> layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename Layout_,
typename ThreadMap_,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dFpropActivationTileAccessIteratorFixedChannels {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kFixedChannels;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kFilterPositionsPerTile = Shape::kColumn / AccessType::kElements;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static bool const kUseFastDivmodPrologue = true;
static bool const kUseFastDivmodMainloop = true;
static int const kStrideH = 0;
static int const kStrideW = 0;
static int const kDilationH = 0;
static int const kDilationW = 0;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dFewChannelsParams<Layout>;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
int rs_index_;
int offset_n_[ThreadMap::Iterations::kStrided];
int offset_p_[ThreadMap::Iterations::kStrided];
int offset_q_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dFpropActivationTileAccessIteratorFixedChannels(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
rs_index_(0) {
//
// This requires problem_size.C == AccessType::kElements
//
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
rs_index_ = (threadblock_offset.column() + thread_coord.contiguous()) / AccessType::kElements;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
int offset_npq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
if (kUseFastDivmodPrologue) {
int residual = params_.divmod_Q.divmod(offset_q_[s], offset_npq);
offset_n_[s] = params_.divmod_P.divmod(offset_p_[s], residual);
}
else {
offset_n_[s] = offset_npq / (problem_size_.P * problem_size_.Q);
int residual = offset_npq % (problem_size_.P * problem_size_.Q);
offset_p_[s] = residual / problem_size_.Q;
offset_q_[s] = residual % problem_size_.Q;
}
}
set_iteration_index(0);
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
rs_index_ += kFilterPositionsPerTile * problem_size_.split_k_slices;
}
/// Returns the coordinate in the activations tensor X that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int n = offset_n_[iteration_strided_];
int p = offset_p_[iteration_strided_];
int q = offset_q_[iteration_strided_];
int rs_index = rs_index_ + iteration_vector_;
int r = 0;
int s = 0;
if (kUseFastDivmodMainloop) {
r = params_.divmod_S.divmod(s, rs_index);
}
else {
s = (rs_index % problem_size_.S);
r = (rs_index / problem_size_.S);
}
if (problem_size_.mode == Mode::kConvolution) {
r = (problem_size_.R - 1 - r);
s = (problem_size_.S - 1 - s);
}
int stride_h = kStrideH;
if (!kStrideH) {
stride_h = problem_size_.stride_h;
}
int stride_w = kStrideW;
if (!kStrideW) {
stride_w = problem_size_.stride_w;
}
int dilation_h = kDilationH;
if (!kDilationH) {
dilation_h = problem_size_.dilation_h;
}
int dilation_w = kDilationW;
if (!kDilationW) {
dilation_w = problem_size_.dilation_w;
}
int h = p * stride_h - problem_size_.pad_h + r * dilation_h;
int w = q * stride_w - problem_size_.pad_w + s * dilation_w;
return TensorCoord(n, h, w, 0);
}
/// Returns true if the current coordinate is within the activations tensor X
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
int32_t offset =
coord.n() * params_.stride_n +
coord.h() * params_.stride_h +
coord.w() * params_.stride_w + coord.c();
AccessType const *ptr = reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
return ptr;
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dFpropActivationTileAccessIteratorFixedChannels &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.C != AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
if (kDilationH && problem_size.dilation_h != kDilationH) {
return Status::kErrorInvalidProblem;
}
if (kDilationW && problem_size.dilation_w != kDilationW) {
return Status::kErrorInvalidProblem;
}
if (kStrideH && problem_size.stride_h != kStrideH) {
return Status::kErrorInvalidProblem;
}
if (kStrideW && problem_size.stride_w != kStrideW) {
return Status::kErrorInvalidProblem;
}
if (platform::is_same<Layout, layout::TensorNCxHWx<32>>::value) {
if (problem_size.C % 32) {
return Status::kErrorInvalidProblem;
}
}
if (platform::is_same<Layout, layout::TensorNCxHWx<64>>::value) {
if (problem_size.C % 64) {
return Status::kErrorInvalidProblem;
}
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,333 | C | 31.016949 | 118 | 0.654107 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_wgrad_activation_tile_access_iterator_optimized.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (activation tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_
>
class Conv3dWgradActivationTileAccessIteratorOptimized {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
static_assert(sizeof_bits<Element>::value >= 8,
"WGRAD requires elements of size 8b or greater.");
//
// Parameters structure
//
struct Params : Conv3dWgradActivationIteratorOptimizedParams {
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() {}
CUTLASS_HOST_DEVICE
Params(Conv3dWgradActivationIteratorOptimizedParams const &base)
: Conv3dWgradActivationIteratorOptimizedParams(base) {}
CUTLASS_HOST_DEVICE
Params(Conv3dProblemSize const &problem_size, Layout const &layout)
: Conv3dWgradActivationIteratorOptimizedParams(
problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn},
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}) {}
};
private:
Params const ¶ms_;
Conv3dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
// Precomputed effective filter postion (t,r,s) in contiguous dimension stays constant for each gemm_iteration_k
// required for nzpq -> ndhw translation
int precomputed_filter_t_[ThreadMap::Iterations::kContiguous];
int precomputed_filter_r_[ThreadMap::Iterations::kContiguous];
int precomputed_filter_s_[ThreadMap::Iterations::kContiguous];
// Channel dimension in contiguous dimension stays constant for each gemm_iteration_k
int filter_c_[ThreadMap::Iterations::kContiguous];
int offset_nzpq_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv3dWgradActivationTileAccessIteratorOptimized(
Params const ¶ms,
Conv3dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
// initialize t,r,s,c filter position for every contiguous iteration
CUTLASS_PRAGMA_UNROLL
for(int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int trsc_offset = threadblock_offset.column() + thread_coord.contiguous()
+ c * ThreadMap::Delta::kContiguous;
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// filter_t_[c] = trsc_offset / (problem_size_.R * problem_size_.S * problem_size_.C);
// int residual = trsc_offset % (problem_size_.R * problem_size_.S * problem_size_.C);
//
// filter_r_[c] = residual / (problem_size_.S * problem_size_.C);
// residual = residual % (problem_size_.S * problem_size_.C);
//
// filter_s_[c] = residual / problem_size_.C;
// filter_c_[c] = residual % problem_size_.C;
int residual;
fast_divmod(precomputed_filter_t_[c], residual, trsc_offset, params_.RSC, params_.rsc_mul, params_.rsc_shr);
fast_divmod(precomputed_filter_r_[c], residual, residual, params_.SC, params_.sc_mul, params_.sc_shr);
fast_divmod(precomputed_filter_s_[c], filter_c_[c], residual, problem_size_.C, params_.c_mul, params_.c_shr);
int t = precomputed_filter_t_[c];
int r = precomputed_filter_r_[c];
int s = precomputed_filter_s_[c];
if (problem_size_.mode == Mode::kConvolution) {
t = (problem_size_.T - 1 - t);
r = (problem_size_.R - 1 - r);
s = (problem_size_.S - 1 - s);
}
// efective t,r,s for every contiguous dimension
precomputed_filter_t_[c] = - problem_size_.pad_d + t * problem_size_.dilation_d;
precomputed_filter_r_[c] = - problem_size_.pad_h + r * problem_size_.dilation_h;
precomputed_filter_s_[c] = - problem_size_.pad_w + s * problem_size_.dilation_w;
}
// initialize n, z, p, q offset for every strided iteration
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_nzpq_[s] = threadblock_offset.row() + thread_coord.strided()
+ s * ThreadMap::Delta::kStrided;
}
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next GEMM-K offset (offset_nzpq_) in GEMM-B by a CTA-K tile
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_nzpq_[s] += Shape::kRow * problem_size_.split_k_slices;
}
}
/// Returns the coordinate in the activation tensor x that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
// The subseqnet fast_divmod() operations are equivalent to the following logical computation:
//
//
// int n = offset_nzpq_[iteration_strided_] / (problem_size_.Z * problem_size_.P * problem_size_.Q);
// int residual = offset_nzpq_[iteration_strided_] % (problem_size_.Z * problem_size_.P * problem_size_.Q);
//
// int z = residual / (problem_size_.P * problem_size_.Q);
// residual = residual % (problem_size_.P * problem_size_.Q);
//
// int p = residual / problem_size_.Q;
// int q = residual % problem_size_.Q;
int residual, n, z, p, q;
fast_divmod(n, residual, offset_nzpq_[iteration_strided_], params_.ZPQ, params_.zpq_mul, params_.zpq_shr);
fast_divmod(z, residual, residual, params_.PQ, params_.pq_mul, params_.pq_shr);
fast_divmod(p, q, residual, problem_size_.Q, params_.q_mul, params_.q_shr);
int d = z * problem_size_.stride_d + precomputed_filter_t_[iteration_contiguous_];
int h = p * problem_size_.stride_h + precomputed_filter_r_[iteration_contiguous_];;
int w = q * problem_size_.stride_w + precomputed_filter_s_[iteration_contiguous_];
return TensorCoord(n, d, h, w, filter_c_[iteration_contiguous_]);
}
/// Returns true if the current coordinate is within the activation tensor x
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.d() >= 0 && coord.d() < problem_size_.D &&
coord.h() >= 0 && coord.h() < problem_size_.H &&
coord.w() >= 0 && coord.w() < problem_size_.W &&
coord.c() < problem_size_.C;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dWgradActivationTileAccessIteratorOptimized &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv3dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % (128/sizeof_bits<Element>::value)) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 12,024 | C | 36.578125 | 115 | 0.65311 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_analytic.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile)
matrix from memory.
This iterator assumes TensorNHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/functional.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
conv::StrideSupport StrideSupport_ = conv::StrideSupport::kStrided,
typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>
>
class Conv2dDgradOutputGradientTileAccessIteratorAnalytic;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradOutputGradientTileAccessIteratorAnalytic strided dgrad needs special handling using
// unscaled coordinations
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradOutputGradientTileAccessIteratorAnalytic <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kStrided,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or greater.");
//
// Simpligying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv2dDgradOutputGradientTileAccessIteratorAnalyticParams;
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
int filter_k_;
int filter_r_;
int filter_s_;
int start_r_;
int start_s_;
int offset_n_[ThreadMap::Iterations::kStrided];
int offset_p_[ThreadMap::Iterations::kStrided];
int offset_q_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod,
int start_r, int start_s,
MatrixCoord const &threadblock_offset = MatrixCoord() // threadblock offset - units are whole CTA tiles
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_k_(0),
filter_r_(start_r),
filter_s_(start_s),
start_r_(start_r),
start_s_(start_s) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
int filter_r = filter_r_;
int filter_s = filter_s_;
if (problem_size_.mode == Mode::kConvolution) {
filter_r = (problem_size_.R - 1 - filter_r);
filter_s = (problem_size_.S - 1 - filter_s);
}
// Starting h, w positions for filter position in gemm_k=0
int start_h, start_w;
strided_dgrad_starting_coords(
problem_size_,
stride_h_divmod, stride_w_divmod,
filter_r, filter_s,
start_h, start_w);
// Effective P and Q for filter position required for remapping NHW rows
int P = (problem_size_.H - start_h + problem_size_.stride_h - 1) / problem_size_.stride_h;
int Q = (problem_size_.W - start_w + problem_size_.stride_w - 1) / problem_size_.stride_w;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
int offset_npq = (threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided) % params_.tiled_rows_per_filter;
// (STEP 1) [reorder NHW rows to start with same filter positions]
offset_n_[s] = offset_npq / (P * Q);
int residual = offset_npq % (P * Q);
int p = (residual / Q);
int q = (residual % Q);
int mapped_h = (start_h + p * problem_size_.stride_h);
int mapped_w = (start_w + q * problem_size_.stride_w);
// Access (p, q) coordinates for Dy tensor and a filter position in gemm_k=0
// note that (h + pad_h - filter_r) and (w + pad_w - filter_s) are divisible
// by stride_h and stride_w
offset_p_[s] = (mapped_h + problem_size_.pad_h - filter_r) / problem_size_.stride_h;
offset_q_[s] = (mapped_w + problem_size_.pad_w - filter_s) / problem_size_.stride_w;
}
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size,
layout,
sizeof_bits<Element>::value,
{Shape::kRow, Shape::kColumn});
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// Move filter_s by stride_w
filter_s_ += problem_size_.stride_w;
if (filter_s_ < problem_size_.S) {
return;
}
// Restore filter_s
filter_s_ = start_s_;
// Move filter_r by stride_h
filter_r_ += problem_size_.stride_h;
if (filter_r_ < problem_size_.R) {
return;
}
// Restore filter_r
filter_r_ = start_r_;
// Move filter_k
filter_k_ += Shape_::kColumn * problem_size_.split_k_slices;
}
/// Returns the coordinate in the output tensor Dy that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int n = offset_n_[iteration_strided_];
int p = offset_p_[iteration_strided_];
int q = offset_q_[iteration_strided_];
int conv_sign = (problem_size_.mode == Mode::kConvolution ? 1 : -1);
p += (conv_sign * (filter_r_ / problem_size_.stride_h));
q += (conv_sign * (filter_s_ / problem_size_.stride_w));
int k = filter_k_ + iteration_vector_ * AccessType::kElements;
return TensorCoord(
n,
p,
q,
k);
}
/// Returns true if the current coordinate is within the output tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return
coord.n() < problem_size_.N &&
coord.h() >= 0 && coord.h() < problem_size_.P &&
coord.w() >= 0 && coord.w() < problem_size_.Q &&
coord.c() < problem_size_.K;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2dDgradOutputGradientTileAccessIteratorAnalytic for unity strides can be optimized by
// eliminating modulo arithmetic to compute unscaled coordinates
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
typename AccessType_
>
class Conv2dDgradOutputGradientTileAccessIteratorAnalytic <
Shape_,
Element_,
ThreadMap_,
conv::StrideSupport::kUnity,
AccessType_
> {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNHWC;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity;
static int const kConvDim = 2;
using ConvProblemSize = typename conv::Conv2dProblemSize;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static_assert(sizeof_bits<Element>::value >= 8,
"DGRAD requires elements of size 8b or greater.");
//
// Simpligying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
struct Params {
Layout layout;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(
Conv2dProblemSize const &problem_size,
Layout const &layout
): layout(layout) {
}
};
private:
Params const ¶ms_;
Conv2dProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
LongIndex iteration_vector_;
char const *pointer_;
int filter_k_;
int filter_r_;
int filter_s_;
int offset_n_[ThreadMap::Iterations::kStrided];
int offset_w_[ThreadMap::Iterations::kStrided];
int offset_h_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorAnalytic(
Params const ¶ms,
Conv2dProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord() // threadblock offset - units are whole CTA tiles
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_k_(0),
filter_r_(0),
filter_s_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_k_ = threadblock_offset.column() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
int offset_nhw = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
offset_n_[s] = offset_nhw / (problem_size_.H * problem_size_.W);
int residual = offset_nhw % (problem_size_.H * problem_size_.W);
offset_h_[s] = residual / problem_size_.W;
offset_w_[s] = residual % problem_size_.W;
}
}
CUTLASS_HOST_DEVICE
static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) {
return Params(problem_size, layout);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_HOST_DEVICE
void advance() {
// move to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
filter_k_ += Shape_::kColumn * problem_size_.split_k_slices;
}
/// Returns the coordinate in the output tensor Dy that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int n = offset_n_[iteration_strided_];
int h = offset_h_[iteration_strided_];
int w = offset_w_[iteration_strided_];
int r = filter_r_;
int s = filter_s_;
if (problem_size_.mode == Mode::kConvolution) {
r = (problem_size_.R - 1 - r);
s = (problem_size_.S - 1 - s);
}
int p = (h + problem_size_.pad_h - r * problem_size_.dilation_h) / problem_size_.stride_h;
int q = (w + problem_size_.pad_w - s * problem_size_.dilation_w) / problem_size_.stride_w;
int k = filter_k_ + iteration_vector_ * AccessType::kElements;
return TensorCoord(n, p, q, k);
}
/// Returns true if the current coordinate is within the output tensor Dy
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
return coord.n() < problem_size_.N &&
coord.h() >= 0 && coord.h() < problem_size_.P &&
coord.w() >= 0 && coord.w() < problem_size_.Q &&
coord.c() < problem_size_.K;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv2dDgradOutputGradientTileAccessIteratorAnalytic &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(Conv2dProblemSize const &problem_size) {
// Conv2dDgradFilterTileAccessIteratorAnalytic unity stride specialization
// only supports (stride_h, stride_w) = (1, 1)
if (problem_size.stride() != MatrixCoord({1, 1})) {
return Status::kErrorNotSupported;
}
// check alignment constraint on iterator's contiguous dimension
if (problem_size.K % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 18,940 | C | 30.204283 | 140 | 0.652798 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.