file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/device/implicit_gemm_convolution_fusion.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Template for device-level fused activation's scale+bias+relu and Implicit GEMM Convolution
*/
#pragma once
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/device_kernel.h"
#include "cutlass/conv/convolution.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template<typename ImplicitGemmFusionKernel_>
class ImplicitGemmConvolutionFusion {
public:
using ImplicitGemmFusionKernel = ImplicitGemmFusionKernel_;
using ElementA = typename ImplicitGemmFusionKernel::ElementA;
using LayoutA = typename ImplicitGemmFusionKernel::LayoutA;
using ElementB = typename ImplicitGemmFusionKernel::ElementB;
using LayoutB = typename ImplicitGemmFusionKernel::LayoutB;
// using ElementScaleBias = typename ImplicitGemmFusionKernel::ElementScaleBias;
// using LayoutScaleBias = typename ImplicitGemmFusionKernel::LayoutScaleBias;
using ElementC = typename ImplicitGemmFusionKernel::ElementC;
using LayoutC = typename ImplicitGemmFusionKernel::LayoutC;
using ElementAccumulator = typename ImplicitGemmFusionKernel::ElementAccumulator;
using ElementCompute = typename ImplicitGemmFusionKernel::ElementCompute;
using OperatorClass = typename ImplicitGemmFusionKernel::OperatorClass;
using ArchTag = typename ImplicitGemmFusionKernel::ArchTag;
using ThreadblockShape = typename ImplicitGemmFusionKernel::ThreadblockShape;
using WarpShape = typename ImplicitGemmFusionKernel::WarpShape;
using InstructionShape = typename ImplicitGemmFusionKernel::InstructionShape;
using ThreadblockSwizzle = typename ImplicitGemmFusionKernel::ThreadblockSwizzle;
using EpilogueOutputOp = typename ImplicitGemmFusionKernel::EpilogueOutputOp;
static int const kStages = ImplicitGemmFusionKernel::kStages;
static int const kConvDim = ImplicitGemmFusionKernel::kConvDim;
using WarpMmaOperator = typename ImplicitGemmFusionKernel::WarpMmaOperator;
using ArchMmaOperator = typename ImplicitGemmFusionKernel::ArchMmaOperator;
using MathOperator = typename ImplicitGemmFusionKernel::MathOperator;
static cutlass::conv::Operator const kConvolutionalOperator = ImplicitGemmFusionKernel::kConvolutionalOperator;
static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = ImplicitGemmFusionKernel::kIteratorAlgorithm;
static int const kWarpCount =
(ThreadblockShape::kM / WarpShape::kM) *
(ThreadblockShape::kN / WarpShape::kN) *
(ThreadblockShape::kK / WarpShape::kK);
/// Argument structure
using Arguments = typename ImplicitGemmFusionKernel::Arguments;
private:
/// Kernel parameters object
typename ImplicitGemmFusionKernel::Params params_;
public:
/// Constructs Implicit GEMM
ImplicitGemmConvolutionFusion() { }
/// Determines whether the Implicit GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
// dispatch to iterators
Status status = ImplicitGemmFusionKernel::Mma::IteratorA::can_implement(args.problem_size);
if (Status::kSuccess != status) {
return status;
}
status = ImplicitGemmFusionKernel::Mma::IteratorB::can_implement(args.problem_size);
if (Status::kSuccess != status) {
return status;
}
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(
threadblock_swizzle.get_tiled_shape(
cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size),
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices));
if (!(grid.y <= std::numeric_limits<uint16_t>::max() &&
grid.z <= std::numeric_limits<uint16_t>::max())) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t workspace_bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size),
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices);
if(args.split_k_mode == SplitKMode::kParallel) {
// Split-K parallel: CTAs in k-dimension write the partial results in a temporary workspace.
// The user needs to call a reduction operator to optain the final output tensor
workspace_bytes =
sizeof(ElementAccumulator) *
size_t(cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, args.problem_size)) *
size_t(grid_tiled_shape.k());
}
else if(args.split_k_mode == SplitKMode::kSerial && args.problem_size.split_k_slices > 1) {
// Split-K serial: The user workspace is used to store semaphore and serialize writing the
// final reduced output to user's output tensor
workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n());
}
return workspace_bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
if (args.problem_size.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
cudaError_t status = cudaMemsetAsync(workspace, 0, get_workspace_size(args), stream);
if (status != cudaSuccess) {
return Status::kErrorInternal;
}
}
// initialize the params structure from the arguments
params_ = typename ImplicitGemmFusionKernel::Params(
args,
static_cast<int *>(workspace)
);
int smem_size = int(sizeof(typename ImplicitGemmFusionKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(cutlass::Kernel<ImplicitGemmFusionKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Initializes Impicit GEMM state from arguments.
Status update(Arguments const &args, void *workspace = nullptr) {
// update the params structure from the arguments
params_.ptr_A = args.ref_A.data();
params_.ptr_B = args.ref_B.data();
params_.ptr_scale = args.ref_A_scale.data();
params_.ptr_bias = args.ref_A_bias.data();
params_.ptr_C = args.ref_C.data();
params_.ptr_D = args.ref_D.data();
params_.output_op = args.output_op;
params_.semaphore = static_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(32 * kWarpCount, 1, 1);
int smem_size = int(sizeof(typename ImplicitGemmFusionKernel::SharedStorage));
cutlass::Kernel<ImplicitGemmFusionKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 10,044 | C | 36.342007 | 114 | 0.681302 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_depthwise_fprop.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level Depthwise implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/kernel/direct_convolution.h"
#include "cutlass/conv/threadblock/depthwise_mma_core_with_lane_access_size.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/depthwise_fprop_pipelined.h"
// Direct Conv Related Header files
#include "cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_optimized.h"
#include "cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_fixed_stride_dilation.h"
#include "cutlass/conv/threadblock/depthwise_fprop_filter_tile_access_iterator_direct_conv_optimized.h"
#include "cutlass/conv/threadblock/depthwise_fprop_direct_conv_multistage.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for DepthwiseFprop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
conv::StrideSupport StrideSupport = StrideSupport::kStrided,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = cutlass::sizeof_bits<ElementB>::value / cutlass::sizeof_bits<ElementB>::value
> struct DefaultDepthwiseFprop;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for DepthwiseFprop with direct convolution algorithm
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename ThreadBlockOutputShape,
typename FilterShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
conv::StrideSupport StrideSupport = StrideSupport::kStrided,
// MatrixShape<Height, Width>
typename StrideShape = cutlass::MatrixShape<-1, -1>,
// MatrixShape< Height, Width>
typename DilationShape = cutlass::MatrixShape<-1, -1>,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value
> struct DefaultDepthwiseDirect2dConvFprop;
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassSimt convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Depthwise specialization for Analytic IteratorAlgorithm
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultDepthwiseFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag, // cutlass::arch::OpMultiplyAdd
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::conv::threadblock::DepthwiseMmaCoreWithLaneAccessSize<
ThreadblockShape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementAccumulator,
layout::RowMajor,
arch::OpClassSimt,
128,
sizeof_bits<ElementB>::value,
2,
MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB,
cutlass::conv::GroupMode::kDepthwise
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::DepthwiseFpropPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv2dProblemSize,
cutlass::conv::GroupMode::kDepthwise
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Depthwise specialization for direct 2d conv implementation,
/// multiple stage pipeline, and SIMT-based mainloop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename ThreadBlockOutputShape,
typename FilterShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
typename StrideShape,
typename DilationShape,
int AlignmentA,
int AlignmentB
>
struct DefaultDepthwiseDirect2dConvFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
StrideShape,
DilationShape,
AlignmentA,
AlignmentB
> {
// One warp handles the entrie groups per cta.
static_assert(ThreadblockShape::kN == WarpShape::kN,
"ThreadblockShape::kN should be same as WarpShape::kN ");
static_assert(ThreadblockShape::kK == FilterShape::kCount && WarpShape::kK == FilterShape::kCount,
"ThreadblockShape::kK and WarpShape::kK should be same as filter size");
static_assert(ThreadblockShape::kM % WarpShape::kM == 0,
"ThreadblockShape::kM must be divisible by WarpShape shape::kM");
static_assert(ThreadBlockOutputShape::kN, "ThreadBlockOutputShape::kN should be 1");
// Define the core components from GEMM
using MmaCore = typename cutlass::conv::threadblock::DepthwiseDirectConvMmaCoreWithLaneAccessSize<
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementAccumulator,
layout::RowMajor,
arch::OpClassSimt,
128,
128,
Stages,
MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM,ThreadblockShape::kN>, // < outputShape:KMNK, groups per cta>
ThreadBlockOutputShape,
ElementA, LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kN, FilterShape::kCount>,
ElementB, LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
using ThreadOutputShape = typename MmaCore::ThreadOutputShape;
static cutlass::arch::CacheOperation::Kind const CacheOpA =
((sizeof_bits<ElementA>::value * AlignmentA) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultDirectConvEpilogueSimt<
ThreadblockShape, // < outputShape:KMNK, groups per cta>
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
ThreadOutputShape,
ThreadBlockOutputShape
>::Epilogue;
// Define the Mma
using Mma = threadblock::DepthwiseFpropDirectConvMultipleStage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
CacheOpA,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages,
Epilogue
>;
// Define the kernel
using Kernel = cutlass::conv::kernel::DirectConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv2dProblemSize,
cutlass::conv::GroupMode::kDepthwise,
ThreadBlockOutputShape
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Depthwise specialization for direct 2d conv implementation,
/// multiple stage pipeline, and SIMT-based mainloop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename ThreadBlockOutputShape,
typename FilterShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
typename StrideShape,
typename DilationShape,
int AlignmentA,
int AlignmentB
>
struct DefaultDepthwiseDirect2dConvFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kFixedStrideDilation,
StrideSupport,
StrideShape,
DilationShape,
AlignmentA,
AlignmentB,
> {
// One warp handles the entrie groups per cta.
static_assert(ThreadblockShape::kN == WarpShape::kN,
"ThreadblockShape::kN should be same as WarpShape::kN ");
static_assert(ThreadblockShape::kK == FilterShape::kCount && WarpShape::kK == FilterShape::kCount,
"ThreadblockShape::kK and WarpShape::kK should be same as filter size");
static_assert(ThreadblockShape::kM % WarpShape::kM == 0,
"ThreadblockShape::kM must be divisible by WarpShape shape::kM");
static_assert(ThreadBlockOutputShape::kN, "ThreadBlockOutputShape::kN should be 1");
static_assert(StrideShape::kRow >= 0 && StrideShape::kColumn >= 0, "Stride should be fixed");
static_assert(DilationShape::kRow >= 0 && DilationShape::kColumn >= 0, "Stride should be fixed");
// Activations loaded by threadblock
static int const ActivationShapeH = (ThreadBlockOutputShape::kH - 1) * StrideShape::kRow +
(FilterShape::kRow - 1) * DilationShape::kRow + 1;
static int const ActivationShapeW = (ThreadBlockOutputShape::kW - 1) * StrideShape::kColumn +
(FilterShape::kColumn - 1) * DilationShape::kColumn + 1;
using ActivationShape =
cutlass::conv::TensorNHWCShape<1, ActivationShapeH, ActivationShapeW, ThreadblockShape::kN >;
// Define the core components from GEMM
using MmaCore = typename cutlass::conv::threadblock::DepthwiseDirectConvMmaCoreWithLaneAccessSize<
ThreadblockShape,
ThreadBlockOutputShape,
FilterShape,
WarpShape,
InstructionShape,
ElementA,
layout::RowMajor,
ElementB,
layout::ColumnMajor,
ElementAccumulator,
layout::RowMajor,
arch::OpClassSimt,
128,
128,
Stages,
MathOperatorTag,
IteratorAlgorithm::kFixedStrideDilation,
StrideShape,
DilationShape,
ActivationShape>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation<
cutlass::MatrixShape<ThreadblockShape::kM,ThreadblockShape::kN>, // < outputShape:KMNK, groups per cta>
ThreadBlockOutputShape,
StrideShape,
DilationShape,
ActivationShape,
ElementA, LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kN, FilterShape::kCount>,
ElementB, LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
using ThreadOutputShape = typename MmaCore::ThreadOutputShape;
static cutlass::arch::CacheOperation::Kind const CacheOpA =
((sizeof_bits<ElementA>::value * AlignmentA) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultDirectConvEpilogueSimt<
ThreadblockShape, // < outputShape:KMNK, groups per cta>
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
ThreadOutputShape,
ThreadBlockOutputShape
>::Epilogue;
// Define the Mma
using Mma = threadblock::DepthwiseFpropDirectConvMultipleStage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
CacheOpA,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages,
Epilogue,
IteratorAlgorithm::kFixedStrideDilation
>;
// Define the kernel
using Kernel = cutlass::conv::kernel::DirectConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv2dProblemSize,
cutlass::conv::GroupMode::kDepthwise,
ThreadBlockOutputShape
>;
};
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 19,294 | C | 31.758913 | 119 | 0.697419 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/implicit_gemm_convolution_with_fused_epilogue.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined Implicit GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/semaphore.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/epilogue/threadblock/output_iterator_parameter.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad)
typename ConvProblemSize_ = Conv2dProblemSize ///! Convolutional operator on 2D or 3D problem
>
struct ImplicitGemmConvolutionWithFusedEpilogue {
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static Operator const kConvolutionalOperator = ConvOperator;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename EpilogueOutputOp::ElementOutput;
/// Set output tensor C layout
using LayoutC = LayoutA;
using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator;
using ElementCompute = typename EpilogueOutputOp::ElementCompute;
using WarpMmaOperator = typename Mma::Policy::Operator;
using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator;
using MathOperator = typename ArchMmaOperator::Operator;
using OperatorClass = typename WarpMmaOperator::OperatorClass;
using ArchTag = typename WarpMmaOperator::ArchTag;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename WarpMmaOperator::Shape;
using InstructionShape = typename ArchMmaOperator::Shape;
static int const kStages = Mma::kStages;
static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm;
static StrideSupport const kStrideSupport = Mma::IteratorA::kStrideSupport;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using TensorRefA = typename Mma::IteratorA::TensorRef;
using TensorRefB = typename Mma::IteratorB::TensorRef;
using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>;
/// Check iterator A and B convolution dimension are the same and
// set device::ImplicitGemmConvolution::kConvDim
static_assert(Mma::IteratorA::kConvDim == Mma::IteratorB::kConvDim,
"Convolution on different different dimensions is not supported");
static int const kConvDim = Mma::IteratorA::kConvDim;
/// Conv dimension and problem size structure (Conv2d or Conv3d)
using ConvProblemSize = ConvProblemSize_;
static conv::GroupMode const kGroupMode = conv::GroupMode::kNone;
/// Wgrad C stride idx for implicit gemm algorithm
// Conv2d row-major matrix C (KxRSC)
// Conv3d row-major matrix C (KxTRSC)
static int const kWgradCStrideIdx =
platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value ? 2 : 3;
/// This chooses the appropriate stride element of the C tensor.
static int const kTensorCStrideIdx =
(kConvolutionalOperator == conv::Operator::kWgrad ? kWgradCStrideIdx : 0);
//
//
//
using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter<
LayoutC,
typename Epilogue::OutputTileIterator::Layout,
TensorRefC,
ConvOperator,
ConvProblemSize
>;
/// Argument structure
struct Arguments {
//
// Data members
//
ConvProblemSize problem_size;
TensorRefA ref_A;
TensorRefB ref_B;
TensorRefC ref_C;
TensorRefC ref_D;
typename EpilogueOutputOp::Params output_op;
SplitKMode split_k_mode;
void * ptr_Vector;
void * ptr_Tensor;
typename LayoutC::Stride::Index ldr;
typename LayoutC::Stride::Index ldt;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments() { }
CUTLASS_HOST_DEVICE
Arguments(
ConvProblemSize const & problem_size
):
problem_size(problem_size) { }
CUTLASS_HOST_DEVICE
Arguments(
ConvProblemSize const & problem_size,
TensorRefA const & ref_A,
TensorRefB const & ref_B,
TensorRefC const & ref_C,
TensorRefC const & ref_D,
typename EpilogueOutputOp::Params const & output_op,
SplitKMode const & split_k_mode = SplitKMode::kSerial,
void * ptr_Vector = nullptr,
void * ptr_Tensor = nullptr,
typename LayoutC::Stride::Index ldr = 0,
typename LayoutC::Stride::Index ldt = 0
):
problem_size(problem_size),
ref_A(ref_A),
ref_B(ref_B),
ref_C(ref_C),
ref_D(ref_D),
output_op(output_op),
split_k_mode(split_k_mode),
ptr_Vector(ptr_Vector),
ptr_Tensor(ptr_Tensor),
ldr(ldr),
ldt(ldt)
{
}
};
/// Parameters structure
struct Params {
ConvProblemSize problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
gemm::GemmCoord implicit_gemm_problem_size;
int swizzle_log_tile;
int gemm_k_iterations;
typename Mma::IteratorA::Params iterator_A;
typename Mma::IteratorA::Element const *ptr_A;
typename Mma::IteratorB::Params iterator_B;
typename Mma::IteratorB::Element const *ptr_B;
typename Epilogue::OutputTileIterator::Params iterator_C;
typename Epilogue::OutputTileIterator::Element *ptr_C;
typename Epilogue::OutputTileIterator::Params iterator_D;
typename Epilogue::OutputTileIterator::Element *ptr_D;
typename EpilogueOutputOp::Params output_op;
int *semaphore;
SplitKMode split_k_mode;
typename Epilogue::TensorTileIterator::Params params_Tensor;
void * ptr_Vector;
typename LayoutC::Stride::Index ldr;
void * ptr_Tensor;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
swizzle_log_tile(0),
gemm_k_iterations(0),
ptr_Vector(nullptr),
ldr(0),
ptr_Tensor(nullptr)
{ }
///
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
int *semaphore = nullptr
):
problem_size(args.problem_size),
implicit_gemm_problem_size(cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size)),
iterator_A(Mma::IteratorA::getParams(args.problem_size, args.ref_A.layout())),
ptr_A(args.ref_A.data()),
iterator_B(args.problem_size, args.ref_B.layout()),
ptr_B(args.ref_B.data()),
iterator_C(ConvOutputIteratorParameter::layout(args.ref_C)),
ptr_C(args.ref_C.data()),
iterator_D(ConvOutputIteratorParameter::layout(args.ref_D)),
ptr_D(args.ref_D.data()),
output_op(args.output_op),
semaphore(semaphore),
split_k_mode(args.split_k_mode),
params_Tensor(args.ldt),
ptr_Vector(args.ptr_Vector),
ldr(args.ldr),
ptr_Tensor(args.ptr_Tensor)
{
gemm_k_iterations = implicit_gemm_k_iterations(kConvolutionalOperator, ThreadblockShape::kK, args.problem_size);
ThreadblockSwizzle threadblock_swizzle;
grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
implicit_gemm_problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices);
swizzle_log_tile = threadblock_swizzle.get_log_tile(grid_tiled_shape);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
ImplicitGemmConvolutionWithFusedEpilogue() { }
/// Executes one ImplicitGEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_idx =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) {
return;
}
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.iterator_A,
params.problem_size,
params.ptr_A,
thread_idx,
MatrixCoord(
threadblock_tile_idx.m() * Mma::Shape::kM,
threadblock_tile_idx.k() * Mma::Shape::kK
)
);
typename Mma::IteratorB iterator_B(
params.iterator_B,
params.problem_size,
params.ptr_B,
thread_idx,
MatrixCoord(
threadblock_tile_idx.k() * Mma::Shape::kK,
threadblock_tile_idx.n() * Mma::Shape::kN
)
);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
mma(params.gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
// Construct the semaphore.
int block_idx = threadblock_tile_idx.m() + threadblock_tile_idx.n() * params.grid_tiled_shape.m();
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// Compute logical position within grid
threadblock_tile_idx =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// If performing a reduction via split-K, fetch the initial synchronization
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_idx.k(), params.grid_tiled_shape.k());
}
MatrixCoord threadblock_offset(
threadblock_tile_idx.m() * Mma::Shape::kM,
threadblock_tile_idx.n() * Mma::Shape::kN
);
// Tile iterator writing to destination tensor
typename Epilogue::OutputTileIterator iterator_D(
params.iterator_D,
params.ptr_D,
ConvOutputIteratorParameter::extent(params.problem_size),
thread_idx,
threadblock_offset
);
// Tile iterator reading from source accumulator tensor
typename Epilogue::OutputTileIterator iterator_C(
params.iterator_C,
params.ptr_C,
ConvOutputIteratorParameter::extent(params.problem_size),
thread_idx,
threadblock_offset
);
typename Epilogue::ElementTensor *ptr_Tensor =
static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor);
// Define the reduction output pointer and move to the appropriate place
typename Epilogue::ElementVector *ptr_Vector =
static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(
params.params_Tensor,
// Only the final block outputs Tensor
((params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_idx.k() + 1))
? nullptr
: ptr_Tensor,
ConvOutputIteratorParameter::extent(params.problem_size),
thread_idx,
threadblock_offset);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Move to appropriate location for this output tile
if (ptr_Vector) {
ptr_Vector += threadblock_offset.column() + threadblock_tile_idx.m() * params.ldr;
}
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_idx.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_idx.k());
}
// Each split-k-slice writes to a unique tensor location
else if (params.split_k_mode == SplitKMode::kParallel) {
iterator_D.add_pointer_offset(threadblock_tile_idx.k() *
cutlass::conv::implicit_gemm_tensor_c_size(ConvOperator, params.problem_size));
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op,
// Only the final block uses Vector
((params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) &&
(params.grid_tiled_shape.k() != threadblock_tile_idx.k() + 1))
? nullptr
: ptr_Vector,
iterator_D,
accumulators,
iterator_C,
tensor_iterator,
ConvOutputIteratorParameter::extent(params.problem_size),
threadblock_offset);
//
// Release the semaphore
//
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_idx.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_idx.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 16,749 | C | 32.5 | 119 | 0.654188 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_conv2d.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions for threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/conv/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_reduction.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/threadblock/conv2d_tile_iterator.h"
#include "cutlass/conv/threadblock/implicit_gemm_pipelined.h"
#include "cutlass/conv/threadblock/implicit_gemm_multistage.h"
#include "cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h"
#include "cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h"
#include "cutlass/conv/kernel/implicit_gemm_convolution.h"
#include "cutlass/conv/kernel/implicit_gemm_convolution_fusion.h"
#include "cutlass/conv/kernel/implicit_gemm_convolution_strided_dgrad.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename ArchTag,
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename OutputOp
>
struct DefaultConvEpilogue {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
OutputOp::kCount
>::Epilogue;
};
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename OutputOp
>
struct DefaultConvEpilogue<
arch::Sm70,
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp
> {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
OutputOp::kCount
>::Epilogue;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ArchTag,
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithBroadcastTensorOp {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
ElementTensor,
ElementVector,
OutputOp,
ElementsPerAccess
>::Epilogue;
};
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithBroadcastTensorOp<
arch::Sm70,
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
ElementTensor,
ElementVector,
OutputOp,
ElementsPerAccess
> {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastVoltaTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
ElementTensor,
ElementVector,
OutputOp,
ElementsPerAccess
>::Epilogue;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ArchTag,
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename OutputOp,
typename ReductionOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithReductionTensorOp {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
ElementsPerAccess
>::Epilogue;
};
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename OutputOp,
typename ReductionOp,
int ElementsPerAccess
>
struct DefaultConvEpilogueWithReductionTensorOp<
arch::Sm70,
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
ElementsPerAccess
> {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithReductionVoltaTensorOp<
Shape,
WarpMmaTensorOp,
PartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
ElementsPerAccess
>::Epilogue;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Defaults for strided Dgrad
template <
typename ArchTag,
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename OutputOp
>
struct DefaultConvEpilogueStridedDgrad {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOpStridedDgrad<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
OutputOp::kCount
>::Epilogue;
};
template <
typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename OutputOp
>
struct DefaultConvEpilogueStridedDgrad<
arch::Sm70,
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp
> {
using Epilogue = typename epilogue::threadblock::DefaultEpilogueVoltaTensorOpStridedDgrad<
Shape,
WarpMmaTensorOp,
PartitionsK,
OutputOp,
OutputOp::kCount
>::Epilogue;
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 7,671 | C | 27.102564 | 100 | 0.675662 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/platform/platform.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/**
* \file
* \brief C++ features that may be otherwise unimplemented for CUDA device functions.
*
* This file has three components:
*
* (1) Macros:
* - Empty macro defines for C++ keywords not supported by the current
* version of C++. These simply allow compilation to proceed (but do
* not provide the added semantics).
* - \p noexcept
* - \p constexpr
* - \p nullptr
* - \p static_assert
*
* - Macro functions that we need in constant expressions because the
* C++ equivalents require constexpr compiler support. These are
* prefixed with \p __NV_STD_*
* - \p __NV_STD_MAX
* - \p __NV_STD_MIN
*
* (2) Re-implementations of STL functions and types:
* - C++ features that need the \p __device__ annotation. These are
* placed into the \p platform namespace.
* - \p abs
* - \p plus
* - \p less
* - \p greater
* - \p min
* - \p max
* - \p methods on std::pair (==, !=, <, <=, >, >=, and make_pair())
*
* (3) Stop-gap implementations of unsupported STL functions and types:
* - STL functions and types defined by C++ 11/14/17/etc. that are not
* provided by the current version of C++. These are placed into the
* \p platform namespace
* - \p integral_constant
* - \p nullptr_t
* - \p true_type
* - \p false_type
* - \p bool_constant
* - \p enable_if
* - \p conditional
* - \p is_same
* - \p is_base_of
* - \p remove_const
* - \p remove_volatile
* - \p remove_cv
* - \p is_volatile
* - \p is_pointer
* - \p is_void
* - \p is_integral
* - \p is_floating_point
* - \p is_arithmetic
* - \p is_fundamental
* - \p is_trivially_copyable
* - \p alignment_of
* - \p aligned_storage
*
* (4) Functions and types that are STL-like (but aren't in the STL):
* - \p TODO: min and max functors?
*
* The idea is that, as we drop support for older compilers, we can simply #define
* the \p __NV_STD_XYZ macros and \p platform namespace to alias their C++
* counterparts (or trivially find-and-replace their occurrences in code text).
*/
//-----------------------------------------------------------------------------
// Dependencies
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__)
#include <cuda/std/cstdint>
#else
#include <stdint.h>
#endif
#if !defined(__CUDACC_RTC__)
//-----------------------------------------------------------------------------
// Include STL files that platform provides functionality for
//-----------------------------------------------------------------------------
#include <algorithm> // Minimum/maximum operations
#include <cstddef> // nullptr_t
#include <functional> // Arithmetic operations
#include <utility> // For methods on std::pair
#if (!defined(_MSC_VER) && (__cplusplus >= 201103L)) || (defined(_MSC_VER) && (_MS_VER >= 1500))
#include <type_traits> // For integral constants, conditional metaprogramming, and type traits
#endif
#include "cutlass/cutlass.h"
#endif
//-----------------------------------------------------------------------------
// OS
//-----------------------------------------------------------------------------
#if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__)
#define CUTLASS_OS_WINDOWS
#endif
/******************************************************************************
* Macros
******************************************************************************/
//-----------------------------------------------------------------------------
// Keywords
//-----------------------------------------------------------------------------
/// noexcept, constexpr
#if (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1900))
#ifndef noexcept
#define noexcept
#endif
#ifndef constexpr
#define constexpr
#endif
#endif
/// nullptr
#if (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1310))
#ifndef nullptr
#define nullptr 0
#endif
#endif
/// static_assert
#if (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1600))
#ifndef static_assert
#define __platform_cat_(a, b) a##b
#define __platform_cat(a, b) __platform_cat_(a, b)
#define static_assert(__e, __m) typedef int __platform_cat(AsSeRt, __LINE__)[(__e) ? 1 : -1]
#endif
#endif
//-----------------------------------------------------------------------------
// Functions
//-----------------------------------------------------------------------------
/// Select maximum(a, b)
#ifndef __NV_STD_MAX
#define __NV_STD_MAX(a, b) (((b) > (a)) ? (b) : (a))
#endif
/// Select minimum(a, b)
#ifndef __NV_STD_MIN
#define __NV_STD_MIN(a, b) (((b) < (a)) ? (b) : (a))
#endif
/******************************************************************************
* Re-implementations
******************************************************************************/
namespace cutlass {
namespace platform {
//-----------------------------------------------------------------------------
// Abs operations <algorithm>
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__)
/// std::abs
CUTLASS_HOST_DEVICE constexpr int abs(int a) {
return (a < 0) ? -a : a;
}
CUTLASS_HOST_DEVICE constexpr long long abs(long long a) {
return (a < 0) ? -a : a;
}
#else
using std::abs;
#endif
//-----------------------------------------------------------------------------
// Minimum/maximum operations <algorithm>
//-----------------------------------------------------------------------------
/// std::min
template <typename T>
CUTLASS_HOST_DEVICE constexpr const T& min(const T& a, const T& b) {
return (b < a) ? b : a;
}
/// std::max
template <typename T>
CUTLASS_HOST_DEVICE constexpr const T& max(const T& a, const T& b) {
return (a < b) ? b : a;
}
#if !defined(__CUDACC_RTC__)
//-----------------------------------------------------------------------------
// Methods on std::pair
//-----------------------------------------------------------------------------
using std::pair;
template <class T1, class T2>
CUTLASS_HOST_DEVICE constexpr bool operator==(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) {
return (lhs.first == rhs.first) && (lhs.second == rhs.second);
}
template <class T1, class T2>
CUTLASS_HOST_DEVICE constexpr bool operator!=(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) {
return (lhs.first != rhs.first) && (lhs.second != rhs.second);
}
template <class T1, class T2>
CUTLASS_HOST_DEVICE constexpr bool operator<(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) {
return (lhs.first < rhs.first) ? true : (rhs.first < lhs.first) ? false
: (lhs.second < rhs.second);
}
template <class T1, class T2>
CUTLASS_HOST_DEVICE constexpr bool operator<=(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) {
return !(rhs < lhs);
}
template <class T1, class T2>
CUTLASS_HOST_DEVICE constexpr bool operator>(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) {
return (rhs < lhs);
}
template <class T1, class T2>
CUTLASS_HOST_DEVICE constexpr bool operator>=(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) {
return !(lhs < rhs);
}
template <class T1, class T2>
CUTLASS_HOST_DEVICE std::pair<T1, T2> make_pair(T1 t, T2 u) {
std::pair<T1, T2> retval;
retval.first = t;
retval.second = u;
return retval;
}
#endif
} // namespace platform
/******************************************************************************
* Implementations of C++ 11/14/17/... STL features
******************************************************************************/
namespace platform {
//-----------------------------------------------------------------------------
// Integral constant helper types <type_traits>
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500))
/// std::integral_constant
template <typename value_t, value_t V>
struct integral_constant;
/// std::integral_constant
template <typename value_t, value_t V>
struct integral_constant {
static const value_t value = V;
typedef value_t value_type;
typedef integral_constant<value_t, V> type;
CUTLASS_HOST_DEVICE operator value_type() const { return value; }
CUTLASS_HOST_DEVICE const value_type operator()() const { return value; }
};
#else
using std::integral_constant;
using std::pair;
#endif
/// The type used as a compile-time boolean with true value.
typedef integral_constant<bool, true> true_type;
/// The type used as a compile-time boolean with false value.
typedef integral_constant<bool, false> false_type;
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus <= 201402L)) || (defined(_MSC_VER) && (_MSC_VER < 1900))
/// std::bool_constant
template <bool V>
struct bool_constant : platform::integral_constant<bool, V> {};
#else
using std::bool_constant;
#endif
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1700))
/// std::nullptr_t
struct nullptr_t {};
#else
using std::nullptr_t;
#endif
//-----------------------------------------------------------------------------
// Conditional metaprogramming <type_traits>
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1600))
/// std::enable_if (true specialization)
template <bool C, typename T = void>
struct enable_if {
typedef T type;
};
/// std::enable_if (false specialization)
template <typename T>
struct enable_if<false, T> {};
/// std::conditional (true specialization)
template <bool B, class T, class F>
struct conditional {
typedef T type;
};
/// std::conditional (false specialization)
template <class T, class F>
struct conditional<false, T, F> {
typedef F type;
};
#else
using std::enable_if;
using std::conditional;
#endif
//-----------------------------------------------------------------------------
// Const/volatility specifiers <type_traits>
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500))
/// std::remove_const (non-const specialization)
template <typename T>
struct remove_const {
typedef T type;
};
/// std::remove_const (const specialization)
template <typename T>
struct remove_const<const T> {
typedef T type;
};
/// std::remove_volatile (non-volatile specialization)
template <typename T>
struct remove_volatile {
typedef T type;
};
/// std::remove_volatile (volatile specialization)
template <typename T>
struct remove_volatile<volatile T> {
typedef T type;
};
/// std::remove_cv
template <typename T>
struct remove_cv {
typedef typename remove_volatile<typename remove_const<T>::type>::type type;
};
#else
using std::remove_const;
using std::remove_volatile;
using std::remove_cv;
#endif
//-----------------------------------------------------------------------------
// Type relationships <type_traits>
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500))
/// std::is_same (false specialization)
template <typename A, typename B>
struct is_same : false_type {};
/// std::is_same (true specialization)
template <typename A>
struct is_same<A, A> : true_type {};
/// Helper for std::is_base_of
template <typename BaseT, typename DerivedT>
struct is_base_of_helper {
typedef char (&yes)[1];
typedef char (&no)[2];
template <typename B, typename D>
struct dummy {
CUTLASS_HOST_DEVICE operator B*() const;
CUTLASS_HOST_DEVICE operator D*();
};
template <typename T>
CUTLASS_HOST_DEVICE static yes check(DerivedT*, T);
CUTLASS_HOST_DEVICE static no check(BaseT*, int);
static const bool value = sizeof(check(dummy<BaseT, DerivedT>(), int())) == sizeof(yes);
};
/// std::is_base_of
template <typename BaseT, typename DerivedT>
struct is_base_of
: integral_constant<bool,
(is_base_of_helper<typename remove_cv<BaseT>::type,
typename remove_cv<DerivedT>::type>::value) ||
(is_same<typename remove_cv<BaseT>::type,
typename remove_cv<DerivedT>::type>::value)> {};
#else
using std::is_same;
using std::is_base_of;
#endif
//-----------------------------------------------------------------------------
// Type properties <type_traits>
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500))
/// std::is_volatile
template <typename T>
struct is_volatile : false_type {};
template <typename T>
struct is_volatile<volatile T> : true_type {};
/// Helper for std::is_pointer (false specialization)
template <typename T>
struct is_pointer_helper : false_type {};
/// Helper for std::is_pointer (true specialization)
template <typename T>
struct is_pointer_helper<T*> : true_type {};
/// std::is_pointer
template <typename T>
struct is_pointer : is_pointer_helper<typename remove_cv<T>::type> {};
/// std::is_void
template <typename T>
struct is_void : is_same<void, typename remove_cv<T>::type> {};
/// std::is_integral
template <typename T>
struct is_integral : false_type {};
template <>
struct is_integral<char> : true_type {};
template <>
struct is_integral<signed char> : true_type {};
template <>
struct is_integral<unsigned char> : true_type {};
template <>
struct is_integral<short> : true_type {};
template <>
struct is_integral<unsigned short> : true_type {};
template <>
struct is_integral<int> : true_type {};
template <>
struct is_integral<unsigned int> : true_type {};
template <>
struct is_integral<long> : true_type {};
template <>
struct is_integral<unsigned long> : true_type {};
template <>
struct is_integral<long long> : true_type {};
template <>
struct is_integral<unsigned long long> : true_type {};
template <typename T>
struct is_integral<volatile T> : is_integral<T> {};
template <typename T>
struct is_integral<const T> : is_integral<T> {};
template <typename T>
struct is_integral<const volatile T> : is_integral<T> {};
/// std::is_floating_point
template <typename T>
struct is_floating_point
: integral_constant<bool,
(is_same<float, typename remove_cv<T>::type>::value ||
is_same<double, typename remove_cv<T>::type>::value)> {};
/// std::is_arithmetic
template <typename T>
struct is_arithmetic
: integral_constant<bool, (is_integral<T>::value || is_floating_point<T>::value)> {};
/// std::is_fundamental
template <typename T>
struct is_fundamental
: integral_constant<bool,
(is_arithmetic<T>::value || is_void<T>::value ||
is_same<nullptr_t, typename remove_cv<T>::type>::value)> {};
#else
using std::is_volatile;
using std::is_pointer;
using std::is_void;
using std::is_integral;
using std::is_floating_point;
using std::is_arithmetic;
using std::is_fundamental;
#endif
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1800)) || \
(defined(__GNUG__) && (__GNUC__ < 5))
/**
* std::is_trivially_copyable
*
* This implementation only evaluates true if T is fundamental or pointer
*
* Without help from partial template specializations provided by the user for
* a specific class or struct, this trait will never report that the specified
* class or struct is trivially-copyable ; this is always safe,
* if possibly sub-optimal.
*/
template <typename T>
struct is_trivially_copyable
: integral_constant<bool, (is_fundamental<T>::value || is_pointer<T>::value)> {};
#else
using std::is_trivially_copyable;
#endif
//-----------------------------------------------------------------------------
// bit_cast <bit>
//-----------------------------------------------------------------------------
template< class To, class From >
constexpr To CUTLASS_HOST_DEVICE bit_cast(const From& from ) noexcept;
template <class To, class From>
constexpr To CUTLASS_HOST_DEVICE bit_cast(const From& src) noexcept
{
static_assert(sizeof(To) == sizeof(From), "sizes must match");
return reinterpret_cast<To const &>(src);
}
//-----------------------------------------------------------------------------
// Alignment and layout utilities
//-----------------------------------------------------------------------------
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500))
/// std::alignment_of
template <typename value_t>
struct alignment_of {
struct pad {
value_t val;
char byte;
};
enum { value = sizeof(pad) - sizeof(value_t) };
};
#else
template <typename value_t>
struct alignment_of : std::alignment_of<value_t> {};
#endif
/* 16B specializations where 32-bit Win32 host compiler disagrees with device compiler */
template <>
struct alignment_of<int4> {
enum { value = 16 };
};
template <>
struct alignment_of<uint4> {
enum { value = 16 };
};
template <>
struct alignment_of<float4> {
enum { value = 16 };
};
template <>
struct alignment_of<long4> {
enum { value = 16 };
};
template <>
struct alignment_of<ulong4> {
enum { value = 16 };
};
template <>
struct alignment_of<longlong2> {
enum { value = 16 };
};
template <>
struct alignment_of<ulonglong2> {
enum { value = 16 };
};
template <>
struct alignment_of<double2> {
enum { value = 16 };
};
template <>
struct alignment_of<longlong4> {
enum { value = 16 };
};
template <>
struct alignment_of<ulonglong4> {
enum { value = 16 };
};
template <>
struct alignment_of<double4> {
enum { value = 16 };
};
// Specializations for volatile/const qualified types
template <typename value_t>
struct alignment_of<volatile value_t> : alignment_of<value_t> {};
template <typename value_t>
struct alignment_of<const value_t> : alignment_of<value_t> {};
template <typename value_t>
struct alignment_of<const volatile value_t> : alignment_of<value_t> {};
#if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1800))
template <size_t Align>
struct aligned_chunk;
template <>
struct __align__(1) aligned_chunk<1> {
uint8_t buff;
};
template <>
struct __align__(2) aligned_chunk<2> {
uint16_t buff;
};
template <>
struct __align__(4) aligned_chunk<4> {
uint32_t buff;
};
template <>
struct __align__(8) aligned_chunk<8> {
uint32_t buff[2];
};
template <>
struct __align__(16) aligned_chunk<16> {
uint32_t buff[4];
};
template <>
struct __align__(32) aligned_chunk<32> {
uint32_t buff[8];
};
template <>
struct __align__(64) aligned_chunk<64> {
uint32_t buff[16];
};
template <>
struct __align__(128) aligned_chunk<128> {
uint32_t buff[32];
};
template <>
struct __align__(256) aligned_chunk<256> {
uint32_t buff[64];
};
template <>
struct __align__(512) aligned_chunk<512> {
uint32_t buff[128];
};
template <>
struct __align__(1024) aligned_chunk<1024> {
uint32_t buff[256];
};
template <>
struct __align__(2048) aligned_chunk<2048> {
uint32_t buff[512];
};
template <>
struct __align__(4096) aligned_chunk<4096> {
uint32_t buff[1024];
};
/// std::aligned_storage
template <size_t Len, size_t Align>
struct aligned_storage {
typedef aligned_chunk<Align> type[Len / sizeof(aligned_chunk<Align>)];
};
#else
using std::aligned_storage;
#endif
#if !defined(__CUDACC_RTC__)
/// Default deleter
template <typename T>
struct default_delete {
void operator()(T* ptr) const { delete ptr; }
};
/// Partial specialization for deleting array types
template <typename T>
struct default_delete<T[]> {
void operator()(T* ptr) const { delete[] ptr; }
};
/// std::unique_ptr
template <class T, class Deleter = default_delete<T> >
class unique_ptr {
public:
typedef T* pointer;
typedef T element_type;
typedef Deleter deleter_type;
private:
/// Pointer to memory
pointer _ptr;
/// Deleter
deleter_type _deleter;
public:
unique_ptr() : _ptr(nullptr) {}
unique_ptr(pointer p) : _ptr(p) {}
~unique_ptr() {
if (_ptr) {
_deleter(_ptr);
}
}
/// Returns a pointer to the managed object or nullptr if no object is owned.
pointer get() const noexcept { return _ptr; }
/// Releases ownership of the managed object, if any
pointer release() noexcept {
pointer p(_ptr);
_ptr = nullptr;
return p;
}
/// Replaces the managed object, deleting the old object.
void reset(pointer p = pointer()) noexcept {
pointer old_ptr = _ptr;
_ptr = p;
if (old_ptr != nullptr) {
get_deleter()(old_ptr);
}
}
/// Swaps the managed objects with *this and another unique_ptr
void swap(unique_ptr& other) noexcept { std::swap(_ptr, other._ptr); }
/// Returns the deleter object
Deleter& get_deleter() noexcept { return _deleter; }
/// Returns the deleter object
Deleter const& get_deleter() const noexcept { return _deleter; }
/// Checks whether an object is owned
operator bool() const noexcept { return _ptr != nullptr; }
/// Dereferences the unique_ptr
T& operator*() const { return *_ptr; }
/// Returns a pointer to the managed object
pointer operator->() const noexcept { return _ptr; }
/// Array access to managed object
T& operator[](size_t i) const { return _ptr[i]; }
};
/// Specializes the swap algorithm
template <typename T, typename Deleter>
void swap(unique_ptr<T, Deleter>& lhs, unique_ptr<T, Deleter>& rhs) noexcept {
lhs.swap(rhs);
}
#endif
/// std::numeric_limits
template <class T>
struct numeric_limits;
template <>
struct numeric_limits<int32_t> {
CUTLASS_HOST_DEVICE
static constexpr int32_t lowest() noexcept { return -2147483647 - 1;}
CUTLASS_HOST_DEVICE
static constexpr int32_t max() noexcept { return 2147483647;}
static constexpr bool is_integer = true;
};
template <>
struct numeric_limits<int16_t> {
CUTLASS_HOST_DEVICE
static constexpr int16_t lowest() noexcept { return -32768;}
CUTLASS_HOST_DEVICE
static constexpr int16_t max() noexcept { return 32767;}
static constexpr bool is_integer = true;
};
template <>
struct numeric_limits<int8_t> {
CUTLASS_HOST_DEVICE
static constexpr int8_t lowest() noexcept { return -128;}
CUTLASS_HOST_DEVICE
static constexpr int8_t max() noexcept { return 127;}
static constexpr bool is_integer = true;
};
template <>
struct numeric_limits<uint32_t> {
CUTLASS_HOST_DEVICE
static constexpr uint32_t lowest() noexcept { return 0;}
CUTLASS_HOST_DEVICE
static constexpr uint32_t max() noexcept { return 4294967295U;}
static constexpr bool is_integer = true;
};
template <>
struct numeric_limits<uint16_t> {
CUTLASS_HOST_DEVICE
static constexpr uint16_t lowest() noexcept { return 0;}
CUTLASS_HOST_DEVICE
static constexpr uint16_t max() noexcept { return 65535U;}
static constexpr bool is_integer = true;
};
template <>
struct numeric_limits<uint8_t> {
CUTLASS_HOST_DEVICE
static constexpr uint8_t lowest() noexcept { return 0;}
CUTLASS_HOST_DEVICE
static constexpr uint8_t max() noexcept { return 255U;}
static constexpr bool is_integer = true;
};
template <>
struct numeric_limits<float> {
CUTLASS_HOST_DEVICE
static constexpr float infinity() noexcept { return bit_cast<float, int32_t>(0x7f800000);}
static constexpr bool is_integer = false;
static constexpr bool has_infinity = true;
};
} // namespace platform
} // namespace cutlass
| 26,097 | C | 28.257848 | 127 | 0.579185 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/tile_iterator_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/epilogue/warp/tensor_op_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename Element, ///< data type of element to be written
typename Layout ///< target shared memory layout
>
class TileIteratorTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename Element_ ///< data type of element to be written
>
class TileIteratorTensorOp<WarpShape_, OperatorShape_, Element_, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorLayout = Layout;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Number of times this iterator can be incremented
using TileIterations = typename Policy::TileIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
};
/// Padding quantity
using Padding = MatrixShape<
0,
Detail::kLanesInQuad * Policy::kElementsPerAccess>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
/// Thread offset
MatrixCoord thread_offset_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorTensorOp(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOp(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / Policy::kElementsPerAccess) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
thread_offset_ = {
quad_id, lane_in_quad * Policy::kElementsPerAccess
};
pointer_ += layout_({thread_offset_.row(), thread_offset_.column() / Policy::kElementsPerAccess});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / Policy::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & add_tile_offset(TensorCoord const &tile_offset) {
MatrixCoord coord_offset(
tile_offset.row() * Shape::kRow,
tile_offset.column() * Shape::kColumn
);
thread_offset_ += coord_offset;
pointer_ += layout_({
coord_offset.row(),
coord_offset.column() / Policy::kElementsPerAccess
});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
pointer_[n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess] = frag_ptr[n];
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
frag_ptr[n] = pointer_[n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess];
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & operator++() {
return add_tile_offset({1, 0});
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename Element_, ///< data type of element to be written
int InterleavedK ///< number of interleaved k
>
class TileIteratorTensorOp<WarpShape_, OperatorShape_, Element_,
layout::ColumnMajorInterleaved<InterleavedK> > {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = Element_;
using Layout = layout::ColumnMajorInterleaved<InterleavedK>;
using TensorLayout = Layout; ///< shared memory tensor ref layout
using TensorRef = TensorRef<Element, TensorLayout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
// Policy::kRowsPerIteration,
WarpShape::kM,
InterleavedK
>;
/// This is the fragment size produced by one tile
using Fragment = Array<
Element,
Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction
* Policy::kElementsPerIteration>;
/// This is the fragment size produced by one iteration
// using Fragment = Array<
// Element, Policy::kElementsPerIteration >;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
using TileIterations = typename Policy::TileIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
};
/// Padding quantity
using Padding = MatrixShape<
0,
Detail::kLanesInQuad * Policy::kElementsPerIteration>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
TensorLayout layout_;
/// Thread offset
MatrixCoord thread_offset_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorTensorOp(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOp(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0]) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
thread_offset_ = {
quad_id, lane_in_quad * Policy::kElementsPerIteration
};
pointer_ += (layout_({thread_offset_.row(), thread_offset_.column()}) / Policy::kElementsPerAccess);
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / Policy::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & add_tile_offset(TensorCoord const &tile_offset) {
MatrixCoord coord_offset(
tile_offset.row() * Shape::kRow,
tile_offset.column() * Shape::kColumn
);
thread_offset_ += coord_offset;
pointer_ += (layout_({
coord_offset.row(),
coord_offset.column()
}) / Policy::kElementsPerAccess);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction; n++ ) {
AccessType *ptr = pointer_ + layout_({n * Policy::kRowsPerIteration, 0}) / Policy::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int a = 0; a < Policy::kAccessPerIteration; ++a) {
ptr[a + pointer_offset / Policy::kElementsPerAccess] = frag_ptr[n * Policy::kAccessPerIteration + a];
// printf("store thread %d, address %p, bank %ld\n", threadIdx.x, pointer_+a+n*Detail::kLanesInQuad,
// ((long long)(pointer_+a+n*Detail::kLanesInQuad)>>2)&0x1f);
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction; n++ ) {
AccessType *ptr = pointer_ + layout_({n * Policy::kRowsPerIteration, 0}) / Policy::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int a = 0; a < Policy::kAccessPerIteration; ++a) {
frag_ptr[n * Policy::kAccessPerIteration + a] = ptr[a + pointer_offset / Policy::kElementsPerAccess];
}
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
CUTLASS_HOST_DEVICE
TileIteratorTensorOp & operator++() {
return add_tile_offset({0, 1});
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename Element_, ///< data type of element to be written
typename Layout_
>
class TileIteratorTensorOpCanonical {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = Element_;
using Layout = Layout_;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
static int const kAccessSize = 1;
static int const kAccessCount = Policy::kElementsPerAccess / kAccessSize;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
};
/// Padding quantity
using Padding = MatrixShape<
0,
Detail::kLanesInQuad * Policy::kElementsPerAccess>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, kAccessSize>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
/// Guard to indicate whether the shape is divisible
bool divisible_;
/// Extent of the output tensor
MatrixCoord extent_;
/// Thread offset
MatrixCoord thread_offset_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpCanonical(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpCanonical(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0]),
divisible_(true),
extent_(WarpShape::kM, WarpShape::kN) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
thread_offset_ = {
quad_id, lane_in_quad * Policy::kElementsPerAccess
};
pointer_ += layout_({thread_offset_.row(), thread_offset_.column()});
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpCanonical(
TensorRef const &ref,
TensorCoord const &extent,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0]),
divisible_(false),
extent_(extent) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
thread_offset_ = {
quad_id, lane_in_quad * Policy::kElementsPerAccess
};
pointer_ += layout_({thread_offset_.row(), thread_offset_.column()});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpCanonical & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpCanonical & add_tile_offset(TensorCoord const &tile_offset) {
MatrixCoord coord_offset(
tile_offset.row() * Shape::kRow,
tile_offset.column() * Shape::kColumn
);
thread_offset_ += coord_offset;
pointer_ += layout_({
coord_offset.row(),
coord_offset.column()
});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpCanonical & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int a = 0; a < kAccessCount; ++a) {
int ptr_idx = n * Detail::kLanesInQuad * kAccessCount + pointer_offset + a;
int frag_idx = n * kAccessCount + a;
int col = thread_offset_.column() + n * Detail::kLanesInQuad * Policy::kElementsPerAccess + a;
if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) {
pointer_[ptr_idx] = frag_ptr[frag_idx];
}
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int a = 0; a < kAccessCount; ++a) {
int ptr_idx = n * Detail::kLanesInQuad * kAccessCount + pointer_offset + a;
int frag_idx = n * kAccessCount + a;
int col = thread_offset_.column() + n * Detail::kLanesInQuad * Policy::kElementsPerAccess + a;
if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) {
frag_ptr[frag_idx] = pointer_[ptr_idx];
}
}
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
CUTLASS_HOST_DEVICE
TileIteratorTensorOpCanonical & operator++() {
return add_tile_offset({1, 0});
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 20,290 | C | 29.19494 | 109 | 0.652785 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/fragment_iterator_simt.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile
that participate in one warp-level store operation.
Typically, the accumulator tile is the largest single block of register-backed storage
within the kernel. Storing it to memory is best accomplished by partitioning it into
smaller tiles and storing these sequentially.
Round trips through shared memory during the Epilogue phase require partitioning, as
shared memory capacity is typically insufficient for a threadblock's total accumulator
size.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/epilogue/warp/simt_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Fragment iterator for SIMT accumulator arrangements
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename Operator, ///< matrix multiply operation (concept: arch::Mma)
typename Layout, ///< target shared memory layout
typename MmaSimtPolicy ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class FragmentIteratorSimt;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major shared memory
template <
typename WarpShape_, ///< shape of the warp-level GEMM tile
typename Operator_ , ///< matrix multiply operator (concept: arch::Mma)
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class FragmentIteratorSimt<WarpShape_, Operator_, layout::RowMajor, MmaSimtPolicy_> {
public:
using WarpShape = WarpShape_;
using Operator = Operator_;
using Layout = layout::RowMajor;
/// Policy for warp-level epilogue components
using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
typename Operator::ElementC,
Policy::kElementsPerIteration>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
typename Operator::ElementC,
Policy::kAccumulatorElementCount>;
using OutputAccumulatorTile = AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
private:
/// Internal access type
using AccessType = Array<typename Operator::ElementC, Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
FragmentIteratorSimt(AccumulatorTile const &accum):
accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0) {
}
/// Increments
CUTLASS_HOST_DEVICE
FragmentIteratorSimt &operator++() {
++index_;
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
FragmentIteratorSimt &operator--() {
--index_;
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, int index_offset = 0) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
int accumulator_access_offset = index_ * Policy::kAccessesPerIteration + n;
frag_ptr[n] = accumulators_[accumulator_access_offset];
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 5,880 | C | 34.642424 | 100 | 0.644728 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile
that participate in one warp-level store operation.
Typically, the accumulator tile is the largest single block of register-backed storage
within the kernel. Storing it to memory is best accomplished by partitioning it into
smaller tiles and storing these sequentially.
Round trips through shared memory during the Epilogue phase require partitioning, as
shared memory capacity is typically insufficient for a threadblock's total accumulator
size.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/epilogue/warp/tensor_op_policy.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
///
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorElementC, ///< matrix multiply operation data type (concept: data type)
typename OperatorFragmentC, ///< matrix multiply operation fragment (concept: Array)
typename Layout ///< target shared memory layout
>
class FragmentIteratorComplexTensorOp;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major shared memory
template <
typename WarpShape_, ///< shape of the warp-level GEMM tile
typename OperatorShape_, ///< underlying real-valued matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorElementC_, ///< underlying real-valued matrix multiply operation data type
typename OperatorFragmentC_ ///< underlying real-valued matrix multiply operation fragment (concept: Array)
>
class FragmentIteratorComplexTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using OperatorElementC = OperatorElementC_;
using OperatorFragmentC = OperatorFragmentC_;
using Layout = layout::RowMajor;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
complex<OperatorElementC>,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
static int const kRealIndex = 0;
/// Offset into the accumulator fragment
static int const kImaginaryIndex =
OperatorFragmentC::kElements * Policy::OperatorCount::kRow * Policy::OperatorCount::kColumn;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<OperatorElementC, 2 * kImaginaryIndex>;
/// This is the complete warp-level accumulator tile.
using OutputAccumulatorTile = Array<complex<OperatorElementC>, kImaginaryIndex>;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
private:
/// Internal access type
using AccessType = Array<OperatorElementC, Policy::kElementsPerAccess>;
using FragmentAccessType = Array<complex<OperatorElementC>, Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
FragmentIteratorComplexTensorOp(AccumulatorTile const &accum):
accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0) {
}
/// Increments
CUTLASS_HOST_DEVICE
FragmentIteratorComplexTensorOp &operator++() {
++index_;
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
FragmentIteratorComplexTensorOp &operator--() {
--index_;
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, int index_offset = 0) const {
int index = index_ + index_offset;
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int accumulator_access_offset =
index + n * Policy::kAccumulatorColumnStride / Policy::kElementsPerAccess;
auto const & real_accum_array = accumulators_[accumulator_access_offset + kRealIndex];
auto const & imag_accum_array = accumulators_[accumulator_access_offset + kImaginaryIndex / Policy::kElementsPerAccess];
// Pack real and imaginary parts into a structure. This is likely to result in MOVs
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Policy::kElementsPerAccess; ++i) {
frag_ptr[n][i].real() = real_accum_array[i];
frag_ptr[n][i].imag() = imag_accum_array[i];
}
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 7,055 | C | 36.531915 | 126 | 0.672998 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/tile_iterator_volta_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/epilogue/warp/tensor_op_policy.h"
#include "cutlass/epilogue/warp/volta_tensor_op_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename InterleavedTileShape, ///< shape of indivisible instruction-level arrangement (concept: GemmShape)
typename ElementC, ///< Accumulator layout
typename Layout ///< target shared memory layout
>
struct TileIteratorVoltaTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape)
>
struct TileIteratorVoltaTensorOp<WarpShape_, gemm::GemmShape<32, 32, 4>, half_t, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using InterleavedTileShape = gemm::GemmShape<32, 32, 4>;
using Element = half_t;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = VoltaTensorOpPolicy<WarpShape, InterleavedTileShape, Element, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// Array type for aligned memory accesses
using AccessType = typename Policy::AccessType;
/// This is the fragment size produced by one access of the iterator.
using Fragment = typename Policy::Fragment;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = typename Policy::AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Number of elements per access
static int const kElementsPerAccess = Policy::kElementsPerAccess;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
static int const kRowsPerQuad = 4;
static int const kColumnsPerQuad = 8;
static int const kAccessesPerQuad = kColumnsPerQuad / Policy::kElementsPerAccess;
static int const kAccessQuadDelta = 16;
};
/// Padding quantity
using Padding = MatrixShape<
0,
Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
TileIteratorVoltaTensorOp(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / Policy::kElementsPerAccess) {
int quad_id = lane_id / Detail::kLanesInQuad;
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
int quad_row_idx = ((quad_id & 4) >> 1) + (quad_id & 1);
int quad_col_idx = ((quad_id & 2) >> 1);
int row = quad_row_idx * Detail::kRowsPerQuad + lane_in_quad;
int column = quad_col_idx * Detail::kColumnsPerQuad;
pointer_ += layout_({row, column / kElementsPerAccess});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / Policy::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_tile_offset(TensorCoord const &tile_offset) {
pointer_ += layout_({
tile_offset.row() * Shape::kRow,
tile_offset.column() * Shape::kColumn / Policy::kElementsPerAccess});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int tile_idx = 0; tile_idx < Policy::TileIterations::kColumn; ++tile_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < Policy::kAccessesPerInterleavedTile; ++access_idx) {
int access_quad = access_idx / 2;
int access = access_idx % 2;
int ptr_offset = tile_idx * InterleavedTileShape::kN / Policy::kElementsPerAccess +
access_quad * Detail::kAccessQuadDelta / Policy::kElementsPerAccess +
access + pointer_offset / Policy::kElementsPerAccess;
int frag_idx = tile_idx * Policy::kAccessesPerInterleavedTile + access_idx;
AccessType access_vector = frag_ptr[frag_idx];
pointer_[ptr_offset] = access_vector;
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int tile_idx = 0; tile_idx < Policy::TileIterations::kColumn; ++tile_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < Policy::kAccessesPerInterleavedTile; ++access_idx) {
int access_quad = access_idx / 2;
int access = access_idx % 2;
int ptr_offset = tile_idx * Detail::kTileDelta + access_quad * Detail::kAccessQuadDelta +
access + pointer_offset / Policy::kElementsPerAccess;
int frag_idx = tile_idx * Policy::kAccessesPerInterleavedTile + access_idx;
frag_ptr[frag_idx] = pointer_[ptr_offset];
}
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment const &frag) {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape)
>
struct TileIteratorVoltaTensorOp<WarpShape_, gemm::GemmShape<32, 32, 4>, float, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using InterleavedTileShape = gemm::GemmShape<32, 32, 4>;
using Element = float;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = VoltaTensorOpPolicy<WarpShape, InterleavedTileShape, Element, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// Array type for aligned memory accesses
using AccessType = typename Policy::AccessType;
/// This is the fragment size produced by one access of the iterator.
using Fragment = typename Policy::Fragment;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = typename Policy::AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Number of elements per access
static int const kElementsPerAccess = Policy::kElementsPerAccess;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
static int const kRowsPerQuad = 4;
static int const kColumnsPerQuad = 8;
static int const kAccessesPerQuad = kColumnsPerQuad / Policy::kElementsPerAccess;
static int const kAccessQuadDelta = 16;
};
/// Padding quantity
using Padding = MatrixShape<
0,
Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
TileIteratorVoltaTensorOp(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / Policy::kElementsPerAccess) {
int quad_id = lane_id / Detail::kLanesInQuad;
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
int const kQuadRowDelta = 4;
int const kQuadColumnDelta = 2 * Policy::MmaIterations::kColumn;
int quad_row_offset = ((quad_id & 4) / 2 + (quad_id & 1)) * kQuadRowDelta;
int quad_column_offset = (quad_id & 2) / 2 * kQuadColumnDelta;
int thread_row_offset = (lane_in_quad & 1);
int thread_column_offset = (lane_in_quad & 2) / 2;
int row = quad_row_offset + thread_row_offset;
int column = quad_column_offset + thread_column_offset;
pointer_ += layout_({row, column});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / Policy::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_tile_offset(TensorCoord const &tile_offset) {
pointer_ += layout_({
tile_offset.row() * Shape::kRow,
tile_offset.column() * Shape::kColumn / Policy::kElementsPerAccess});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
int const kAccessesPerRow = Policy::TileIterations::kColumn * Policy::MmaIterations::kColumn * 2;
CUTLASS_PRAGMA_UNROLL
for (int row_idx = 0; row_idx < Policy::kRowsPerMmaTile; ++row_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < kAccessesPerRow; ++access_idx) {
int frag_idx = row_idx * kAccessesPerRow + access_idx;
int ptr_column_offset = (access_idx & 1) * 2 +
(access_idx & 2) * Policy::MmaIterations::kColumn * 2 +
(access_idx & 4) * Policy::MmaIterations::kColumn * 2;
int ptr_row_offset = row_idx * 2;
int ptr_offset = layout_({ptr_row_offset, ptr_column_offset}) + pointer_offset / Policy::kElementsPerAccess;
pointer_[ptr_offset] = frag_ptr[frag_idx];
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
assert(0); // TODO
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment const &frag) {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 14,258 | C | 31.333333 | 116 | 0.655842 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/wmma_tensor_op_policy.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic structures needed for implementing the warp-scoped phase of the epilogue.
These quantities assume a 'column-major' arrangement of TensorOp instructions, of which
a row-oriented slice is visible per iteration.
*/
#pragma once
#include "cutlass/arch/wmma.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
/// Policy details related to the epilogue
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation shape (concept: gemm:GemmShape)
typename Layout ///< target shared memory layout
>
struct WmmaTensorOpPolicy;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape ///< matrix multiply operation shape (concept: gemm::GemmShape)
>
struct WmmaTensorOpPolicy<WarpShape, OperatorShape, layout::RowMajor> {
/// Number of operations
using OperatorCount = MatrixShape<
WarpShape::kM / OperatorShape::kM,
WarpShape::kN / OperatorShape::kN
>;
//
// Hard-coded constants regarding Tensor Operations
//
static int const kElementsPerAccess = 2;
static int const kRowsPerIteration = OperatorShape::kM;
static int const kWmmaFragmentsPerAccess = 1;
//
// Derived quantities
//
// Number of externally visible iterations
static int const kIterations = OperatorCount::kRow;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
#endif
| 3,916 | C | 37.40196 | 100 | 0.632022 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/tile_iterator_wmma_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#if !(defined(__clang__) && defined(__CUDA__))
#include "cutlass/cutlass.h"
#include "cutlass/wmma_array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/epilogue/warp/wmma_tensor_op_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorFragment, ///< wmma fragment to be written (concept: nvcuda::wmma::fragment)
typename Layout ///< target shared memory layout
>
class TileIteratorWmmaTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorFragment_ ///< wmma fragment to be written (concept: nvcuda::wmma::fragment)
>
class TileIteratorWmmaTensorOp<WarpShape_, OperatorShape_, OperatorFragment_, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using OperatorFragment = OperatorFragment_;
using Layout = layout::RowMajor;
//
// Derived types
//
using WmmaDataType = typename OperatorFragment::element_type;
using Element = typename cutlass::arch::WmmaToCutlassDataType<WmmaDataType>::Type; ///< Data Type of element stored in nvcuda::wmma::frament
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = WmmaTensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = WmmaFragmentArray<OperatorFragment, Policy::OperatorCount::kColumn * Policy::kWmmaFragmentsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Padding quantity
// (Epilogue shared memory padding for WMMA Gemm kernel is set to run optimaly on Turing)
using Padding = MatrixShape<
0,
4 * Policy::kElementsPerAccess
>;
private:
/// Storage type for accessing memory
//using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>;
//
// Data members
//
/// Internal pointer to shared memory
TensorRef ref_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorWmmaTensorOp(): ref_(nullptr) {
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorWmmaTensorOp(
TensorRef const &ref,
unsigned lane_id
): ref_(ref) {
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorWmmaTensorOp & add_pointer_offset(Index pointer_offset) {
ref_.add_pointer_offset(pointer_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorWmmaTensorOp & add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset({tile_offset.row() * OperatorShape::kM, tile_offset.column() * WarpShape::kN});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorWmmaTensorOp & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
for(int n=0; n < Policy::OperatorCount::kColumn; n++) {
WmmaDataType* ptr = reinterpret_cast<WmmaDataType*> (ref_.data() + ref_.offset({0, n * OperatorShape::kN}) + pointer_offset);
nvcuda::wmma::store_matrix_sync(
ptr,
frag[n],
ref_.stride()[0],
nvcuda::wmma::layout_t::mem_row_major
);
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
for(int n=0; n < Policy::OperatorCount::kColumn; n++) {
WmmaDataType* ptr = reinterpret_cast<WmmaDataType*> (ref_.data() + ref_.offset({0, n * OperatorShape::kN}) + pointer_offset);
nvcuda::wmma::load_matrix_sync(
frag[n],
ptr,
ref_.stride()[0],
nvcuda::wmma::layout_t::mem_row_major
);
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#endif // !defined(__clang__)
| 7,704 | C | 32.79386 | 151 | 0.635254 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/tile_iterator_simt.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/epilogue/warp/simt_policy.h"
#define CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES 1
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename Operator, ///< matrix multiply operation (concept: arch::Mma)
typename Element, ///< data type of element to be written
typename Layout, ///< target shared memory layout
typename MmaSimtPolicy ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimt;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename Operator_, ///< matrix multiply operation (concept: arch::Mma)
typename Element_, ///< data type of element to be written
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimt<WarpShape_, Operator_, Element_, layout::RowMajor, MmaSimtPolicy_> {
public:
using WarpShape = WarpShape_;
using Operator = Operator_;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
typename Operator::ElementC,
Policy::kElementsPerIteration>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
typename Operator::ElementC,
Policy::kAccumulatorElementCount>;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Padding quantity
using Padding = MatrixShape<
0,
4 * Policy::kElementsPerAccess
#if CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES
+ 1
#endif
>;
private:
#if CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES
/// Storage type for accessing memory
using AccessType = AlignedArray<
Element,
1
>;
#else
/// Storage type for accessing memory
using AccessType = AlignedArray<
Element,
Policy::kElementsPerAccess
>;
#endif
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorSimt(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimt(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements) {
auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
pointer_ += layout_({
lane_offset.row(),
lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements)
});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorSimt & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / AccessType::kElements;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimt & add_tile_offset(TensorCoord const &tile_offset) {
pointer_ += layout_({
tile_offset.row() * Shape::kRow,
(tile_offset.column() * Shape::kColumn / int(AccessType::kElements))
});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimt & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
#if CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES
// de-vectorized stores
using ScalarAccessType = AlignedArray<Element, 1>;
ScalarAccessType const *scalarFragPtr = reinterpret_cast<ScalarAccessType const *>(&frag);
ScalarAccessType *scalarPointer = reinterpret_cast<ScalarAccessType *>(pointer_) + pointer_offset;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::kElementsPerAccess; s++) {
scalarPointer[n * Policy::MmaSimtPolicy::WarpShape::kColumn * Policy::kElementsPerAccess + s] = scalarFragPtr[n * Policy::kElementsPerAccess + s];
}
}
#else
// original vector stores
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)] = frag_ptr[n];
}
#endif
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
frag_ptr[n] = pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)];
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename Operator_, ///< matrix multiply operation (concept: arch::Mma)
typename Element_, ///< data type of element to be written
typename Layout_, ///< target shared memory layout
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimtDirectConv {
public:
using WarpShape = WarpShape_;
using Operator = Operator_;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>;
/// Shape of the tile in memory
using Shape = MatrixShape<Policy::kRowsPerIteration, WarpShape::kN>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<typename Operator::ElementC, Policy::kElementsPerIteration>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<typename Operator::ElementC, Policy::kAccumulatorElementCount>;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Padding quantity
using Padding = MatrixShape<0,
0
>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<
Element,
Policy::kElementsPerAccess
>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
/// Base smem offset;
Index base_smem_address_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv() : pointer_(nullptr) {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements) {
auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
pointer_ += layout_({
lane_offset.row(),
lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements)
});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / AccessType::kElements;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv & add_tile_offset(TensorCoord const &tile_offset) {
pointer_ += layout_({
tile_offset.row() * Shape::kRow,
(tile_offset.column() * Shape::kColumn / int(AccessType::kElements))
});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimtDirectConv & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
// original vector stores
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
AccessType * load_pointer_ = reinterpret_cast<AccessType *>(reinterpret_cast<uint8_t *>(pointer_) + base_smem_address_);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
load_pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)] = frag_ptr[n];
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
frag_ptr[n] = pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)];
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address){
base_smem_address_ = address;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename ThreadOutputShape_, /// Size of the matrix to load (concept: TensorNHWC)
typename ThreadBlockOutputShape_, /// Size of the matrix to load (concept: TensorNHWC)
typename Operator_, ///< matrix multi ply operation (concept: arch::Mma)
typename Element_, ///< data type of element to be written
typename Layout_, ///< target shared memory layout
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimtDirect2dConv {
public:
using WarpShape = WarpShape_;
using ThreadOutputShape = ThreadOutputShape_;
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
using Operator = Operator_;
using Element = Element_;
using Layout = layout::RowMajor;
using MmaSimtPolicy = MmaSimtPolicy_;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
// Thread-level shape of a fragment
using ThreadShape = MatrixShape<ThreadOutputShape::kNHW, ThreadOutputShape::kC>;
static_assert(!(ThreadShape::kColumn % MmaSimtPolicy::LaneMmaShape::kN),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
using ThreadTileCount = MatrixShape<ThreadBlockOutputShape::kH / ThreadOutputShape::kH,
ThreadBlockOutputShape::kW / ThreadOutputShape::kW>;
using Iterations =
MatrixShape<ThreadShape::kRow, ThreadShape::kColumn / MmaSimtPolicy::LaneMmaShape::kN>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = typename Operator::FragmentC;
/// This is the fragment size produced by one access of the iterator.
using Fragment = AccumulatorTile;
/// Padding quantity
using Padding = MatrixShape<0, 0>;
private:
// Storage type for accessing memory
using AccessType = AlignedArray<Element, MmaSimtPolicy::LaneMmaShape::kN>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
/// Base smem offset;
Index base_smem_address_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorSimtDirect2dConv() : pointer_(nullptr) {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimtDirect2dConv(TensorRef const &ref, unsigned thread_id, unsigned lane_id)
: pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements) {
auto lane_layout = MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
// Get base HW offset of current threads
const int threadgroup = thread_id / (ThreadBlockOutputShape::kC / ThreadOutputShape::kC);
const int base_p = (threadgroup / (ThreadTileCount::kColumn)) * ThreadOutputShape::kH;
const int base_q = (threadgroup % (ThreadTileCount::kColumn)) * ThreadOutputShape::kW;
const int row_offset = base_p * ThreadBlockOutputShape::kW + base_q;
pointer_ += layout_(
{row_offset,
lane_offset.column() * MmaSimtPolicy::LaneMmaShape::kN / int(AccessType::kElements)});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorSimtDirect2dConv &add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / AccessType::kElements;
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType *storer_pointer_ =
reinterpret_cast<AccessType *>(reinterpret_cast<uint8_t *>(pointer_) + base_smem_address_);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int h = 0; h < ThreadOutputShape::kH; ++h) {
CUTLASS_PRAGMA_UNROLL
for (int w = 0; w < ThreadOutputShape::kW; ++w) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < Iterations::kColumn; ++col) {
int offset = (w + h * ThreadBlockOutputShape::kW) *
(ThreadBlockOutputShape::kC / AccessType::kElements) +
col;
storer_pointer_[offset + pointer_offset / int(AccessType::kElements)] =
frag_ptr[w + h * ThreadOutputShape::kW + col];
}
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) { base_smem_address_ = address; }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename Operator_, ///< matrix multiply operation (concept: arch::Mma)
typename Element_, ///< data type of element to be written
typename Layout_, ///< target shared memory layout
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class TileIteratorSimtCanonical {
public:
using WarpShape = WarpShape_;
using Operator = Operator_;
using Element = Element_;
using Layout = Layout_;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
typename Operator::ElementC,
Policy::kElementsPerIteration>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
typename Operator::ElementC,
Policy::kAccumulatorElementCount>;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Padding quantity
using Padding = MatrixShape<
0,
4 * Policy::kElementsPerAccess + 1
>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<
Element,
1
>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
/// Guard to indicate whether the shape is divisible
bool divisible_;
/// Extent of the output tensor
MatrixCoord extent_;
/// Thread offset
MatrixCoord thread_offset_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements),
divisible_(true),
extent_(WarpShape::kM, WarpShape::kN) {
auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
thread_offset_ = {
lane_offset.row() * Shape::kRow,
lane_offset.column() * Policy::kElementsPerAccess
};
pointer_ += layout_({
lane_offset.row() * Shape::kRow,
lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements)
});
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical(
TensorRef const &ref,
TensorCoord const &extent,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / AccessType::kElements),
divisible_(false),
extent_(extent) {
auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id);
thread_offset_ = {
lane_offset.row() * Shape::kRow,
lane_offset.column() * Policy::kElementsPerAccess
};
pointer_ += layout_({
lane_offset.row() * Shape::kRow,
lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements)
});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / AccessType::kElements;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical & add_tile_offset(TensorCoord const &tile_offset) {
MatrixCoord coord_offset(
tile_offset.row(),
tile_offset.column() * Shape::kColumn
);
thread_offset_ += coord_offset;
pointer_ += layout_({
coord_offset.row(),
coord_offset.column()
});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
// de-vectorized stores
using ScalarAccessType = AlignedArray<Element, 1>;
ScalarAccessType const *scalarFragPtr = reinterpret_cast<ScalarAccessType const *>(&frag);
ScalarAccessType *scalarPointer = reinterpret_cast<ScalarAccessType *>(pointer_) + pointer_offset;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::kElementsPerAccess; s++) {
int ptr_idx = n * Policy::MmaSimtPolicy::WarpShape::kColumn * Policy::kElementsPerAccess + s;
int frag_idx = n * Policy::kElementsPerAccess + s;
int col = thread_offset_.column() + ptr_idx;
if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) {
scalarPointer[ptr_idx] = scalarFragPtr[frag_idx];
}
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
// de-vectorized loads
using ScalarAccessType = AlignedArray<Element, 1>;
ScalarAccessType *scalarFragPtr = reinterpret_cast<ScalarAccessType *>(&frag);
ScalarAccessType const *scalarPointer = reinterpret_cast<ScalarAccessType const*>(pointer_) + pointer_offset;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::kElementsPerAccess; s++) {
int ptr_idx = n * Policy::MmaSimtPolicy::WarpShape::kColumn * Policy::kElementsPerAccess + s;
int frag_idx = n * Policy::kElementsPerAccess + s;
int col = thread_offset_.column() + ptr_idx;
if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) {
scalarFragPtr[frag_idx] = scalarPointer[ptr_idx];
}
}
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
CUTLASS_HOST_DEVICE
TileIteratorSimtCanonical & operator++() {
return add_tile_offset({1, 0});
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 25,658 | C | 31.645038 | 156 | 0.653519 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/epilogue/warp/tensor_op_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
// This is an optimization available on CUDA 11.2 and beyond that eliminates branches in the epilogue.
#define CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED ((__CUDACC_VER_MAJOR__ * 10 + __CUDACC_VER_MINOR__) >= 112)
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory. This is optimized
/// for mixed-precision epilogues in which the accumulators are 32b in width, but the output
/// data type is smaller.
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename Element_, ///< data type of accumulator element
int ElementSizeBits, ///< Size of accumulator element in bits
int OutputSizeBits, ///< Size of output element in bits
int OutputElementCount, ///< number of elements in output vector
int ContiguousLanes ///< Number of consecutive lanes writing to contiguous memory
>
class TileIteratorTensorOpMixed {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kOutputElementCount = OutputElementCount;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
/// Number of pointers needed to write accumulators
static int const kPointerCount =
(OutputElementCount * sizeof_bits<Element>::value) / (const_min(128, OutputElementCount * sizeof_bits<Element>::value));
static_assert(kPointerCount <= 4, "Can only accommodate four pointers at present.");
static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32).");
};
/// Padding quantity
using Padding = MatrixShape<
0,
Detail::kLanesInQuad * Policy::kElementsPerAccess>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointers_[Detail::kPointerCount];
/// Stride in units of AccessType
int stride_;
/// Logical column in which warp tile is aligned
int warp_column_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed() {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] = nullptr;
}
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed(
TensorRef const &ref,
unsigned lane_id
):
stride_(ref.stride()[0] / Policy::kElementsPerAccess),
warp_column_(0) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_;
int column_idx = (lane_in_quad % 2) + (((lane_in_quad / 2) + i) % Detail::kPointerCount) * 2;
ptr += column_idx;
if (i == 0) {
pointers_[0 % Detail::kPointerCount] = ptr;
}
else if (i == 1) {
pointers_[1 % Detail::kPointerCount] = ptr;
}
else if (i == 2) {
pointers_[2 % Detail::kPointerCount] = ptr;
}
else if (i == 3) {
pointers_[3 % Detail::kPointerCount] = ptr;
}
}
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += pointer_offset / Policy::kElementsPerAccess;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += tile_offset.row() * Shape::kRow * stride_ +
tile_offset.column() * Shape::kColumn / Policy::kElementsPerAccess;
}
warp_column_ += tile_offset.column() * Shape::kColumn;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) {
return add_tile_offset(tile_offset);
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
AccessType *ptr = pointers_[0];
#if CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED
// When the optimization is enabled, small tiles require separate logic.
bool kN32_optimization = (WarpShape::kN * Detail::kLanesInQuad * Policy::kElementsPerAccess * sizeof_bits<Element>::value) % 1024 == 0;
if (kN32_optimization) {
int ptr_idx = ((warp_column_ * sizeof_bits<Element>::value) / 1024) % Detail::kPointerCount;
if (ptr_idx == 0) {
ptr = pointers_[0];
} else if (ptr_idx == 1) {
ptr = pointers_[1];
} else if (ptr_idx == 2) {
ptr = pointers_[2];
} else if (ptr_idx == 3) {
ptr = pointers_[3];
}
}
#endif
CUTLASS_PRAGMA_UNROLL
for (int64_t n = 0; n < Policy::OperatorCount::kColumn; ++n) {
#if CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED
//
// When the optimization is enabled, this expression suffices to obtain the SMEM pointer.
//
if (WarpShape::kN == 64) {
ptr = pointers_[n / 4];
}
else if (!kN32_optimization)
#endif
{
// This is the reference implementation
int column_idx = warp_column_ + n * Detail::kLanesInQuad * Policy::kElementsPerAccess;
int ptr_idx = ((column_idx * sizeof_bits<Element>::value) / 1024) % Detail::kPointerCount;
if (ptr_idx == 0) {
ptr = pointers_[0 % Detail::kPointerCount];
}
else if (ptr_idx == 1) {
ptr = pointers_[1 % Detail::kPointerCount];
}
else if (ptr_idx == 2) {
ptr = pointers_[2 % Detail::kPointerCount];
}
else if (ptr_idx == 3) {
ptr = pointers_[3 % Detail::kPointerCount];
}
}
int offset = n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess;
ptr[offset] = frag_ptr[n];
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int64_t n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int column_idx = warp_column_ + n * Detail::kLanesInQuad * Policy::kElementsPerAccess;
int ptr_idx = ((column_idx * sizeof_bits<Element>::value) / 1024) % Detail::kPointerCount;
AccessType const *smem_ptr = pointers_[ptr_idx];
frag_ptr[n] = smem_ptr[n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess];
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for int32_t x 16 => int8_t/int4b_t x 16
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape),
int OutputSizeBits ///< Size of output element in bits
>
class TileIteratorTensorOpMixed<WarpShape_, OperatorShape_, int32_t, 32, OutputSizeBits, 16, 8> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = int32_t;
using Layout = layout::RowMajor;
static int const kOutputElementCount = 16;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
/// Number of pointers needed to write accumulators
static int const kPointerCount = 2;
/// Offsets added
static int const kOffsetCount = 4;
static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32).");
};
/// Padding quantity
using Padding = MatrixShape<0, Detail::kLanesInQuad * 2>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, 2>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointers_[Detail::kPointerCount];
/// Stride in units of AccessType
int stride_;
/// Uniform offset in bytes added to warp tile iterator
int uniform_offset_[Detail::kOffsetCount];
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed() {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] = nullptr;
}
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed(
TensorRef const &ref,
unsigned lane_id
):
stride_(ref.stride()[0] / AccessType::kElements) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_;
int column_idx = lane_in_quad ^ (i * 2);
ptr += column_idx;
if (i == 0) {
pointers_[0] = ptr;
}
else if (i == 1) {
pointers_[1] = ptr;
}
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kOffsetCount; ++i) {
uniform_offset_[i] = (i ^ 0) * 4 * sizeof(AccessType);
}
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += pointer_offset / AccessType::kElements;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) {
int ptr_offset = tile_offset.row() * Shape::kRow * stride_ +
tile_offset.column() * Shape::kColumn / AccessType::kElements;
pointers_[0] += ptr_offset;
pointers_[1] += ptr_offset;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kOffsetCount; ++i) {
uniform_offset_[i] = (i ^ tile_offset.column()) * 4 * sizeof(AccessType);
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) {
return add_tile_offset(tile_offset);
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int ptr_idx = (n / 4);
int offset_idx = (n % 4);
AccessType *ptr;
if (ptr_idx == 0) {
ptr = pointers_[0];
}
else if (ptr_idx == 1) {
ptr = pointers_[1];
}
int offset = (n / 4) * 16 + pointer_offset / AccessType::kElements;
#if 0
//
// Using inline PTX to avoid generic memory
//
AccessType *smem_ptr = pointers_[ptr_idx];
smem_ptr[offset] = frag_ptr[n];
#else
uint32_t smem_addr = arch::cutlass_get_smem_pointer(ptr);
uint32_t const *data = reinterpret_cast<uint32_t const *>(frag_ptr + n);
uint32_t offset_in_bytes = offset * sizeof(AccessType) + uniform_offset_[offset_idx];
asm volatile(
"{ .reg .u32 smem_ptr; add.u32 smem_ptr, %0, %1; st.shared.v2.u32 [smem_ptr], {%2, %3}; }\n"
: : "r"(smem_addr), "r"(offset_in_bytes), "r"(data[0]), "r"(data[1])
);
#endif
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for int32_t x 8 => int8_t/int4b_t x 8
template <
typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape)
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
int OutputSizeBits ///< Size of output element in bits
>
class TileIteratorTensorOpMixed<WarpShape_, OperatorShape_, int32_t, 32, OutputSizeBits, 8, 8> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using Element = int32_t;
using Layout = layout::RowMajor;
static int const kOutputElementCount = 8;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
Element,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
//using AccumulatorTile = typename Operator::FragmentC;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
/// Number of pointers needed to write accumulators
static int const kPointerCount = 2;
static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32).");
};
/// Padding quantity
using Padding = MatrixShape<0, Detail::kLanesInQuad * 2>;
private:
/// Storage type for accessing memory
using AccessType = AlignedArray<Element, 2>;
//
// Data members
//
/// Internal pointer to memory
AccessType *pointers_[Detail::kPointerCount];
/// Stride in units of AccessType
int stride_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed() {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] = nullptr;
}
}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed(
TensorRef const &ref,
unsigned lane_id
):
stride_(ref.stride()[0] / AccessType::kElements) {
int quad_id = (lane_id / Detail::kLanesInQuad);
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_;
int column_idx = lane_in_quad ^ (i * 2);
ptr += column_idx;
if (i == 0) {
pointers_[0] = ptr;
}
else if (i == 1) {
pointers_[1] = ptr;
}
}
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int64_t i = 0; i < Detail::kPointerCount; ++i) {
pointers_[i] += pointer_offset / AccessType::kElements;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) {
int ptr_offset = tile_offset.row() * Shape::kRow * stride_ +
tile_offset.column() * Shape::kColumn / AccessType::kElements;
pointers_[0] += ptr_offset;
pointers_[1] += ptr_offset;
if (tile_offset.column() % 2) {
auto tmp = pointers_[0];
pointers_[0] = pointers_[1];
pointers_[1] = tmp;
}
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) {
return add_tile_offset(tile_offset);
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int ptr_idx = (n / 4);
AccessType *ptr;
if (ptr_idx == 0) {
ptr = pointers_[0];
}
else if (ptr_idx == 1) {
ptr = pointers_[1];
}
int offset = (n / 4) * 16 + pointer_offset / AccessType::kElements + (n % 4) * 4;
#if 0
//
// Using inline PTX to avoid generic memory
//
AccessType *smem_ptr = pointers_[ptr_idx];
smem_ptr[offset] = frag_ptr[n];
#else
uint32_t smem_addr = arch::cutlass_get_smem_pointer(ptr);
uint32_t const *data = reinterpret_cast<uint32_t const *>(frag_ptr + n);
uint32_t offset_in_bytes = offset * sizeof(AccessType);
asm volatile(
"{ .reg .u32 smem_ptr; add.u32 smem_ptr, %0, %1; st.shared.v2.u32 [smem_ptr], {%2, %3}; }\n"
: : "r"(smem_addr), "r"(offset_in_bytes), "r"(data[0]), "r"(data[1])
);
#endif
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#undef CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED
/////////////////////////////////////////////////////////////////////////////////////////////////
| 22,857 | C | 30.398352 | 140 | 0.626241 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/fragment_iterator_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile
that participate in one warp-level store operation.
Typically, the accumulator tile is the largest single block of register-backed storage
within the kernel. Storing it to memory is best accomplished by partitioning it into
smaller tiles and storing these sequentially.
Round trips through shared memory during the Epilogue phase require partitioning, as
shared memory capacity is typically insufficient for a threadblock's total accumulator
size.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/epilogue/warp/tensor_op_policy.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
///
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorElementC, ///< matrix multiply operation data type (concept: data type)
typename OperatorFragmentC, ///< matrix multiply operation fragment (concept: Array)
typename Layout ///< target shared memory layout
>
class FragmentIteratorTensorOp;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major shared memory
template <
typename WarpShape_, ///< shape of the warp-level GEMM tile
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorElementC_, ///< matrix multiply operation data type (concept: data type)
typename OperatorFragmentC_ ///< matrix multiply operation fragment (concept: Array)
>
class FragmentIteratorTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using OperatorElementC = OperatorElementC_;
using OperatorFragmentC = OperatorFragmentC_;
using Layout = layout::RowMajor;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
OperatorElementC,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
OperatorElementC,
OperatorFragmentC::kElements * Policy::OperatorCount::kRow * Policy::OperatorCount::kColumn>;
using OutputAccumulatorTile = AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
using TileIterations = typename Policy::TileIterations;
static int const kIterationsPerTile = kIterations / TileIterations::kCount;
private:
/// Internal access type
using AccessType = Array<OperatorElementC, Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp(AccumulatorTile const &accum):
accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0) {
}
/// Increments
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp &operator++() {
++index_;
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp &operator--() {
--index_;
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, int index_offset = 0) const {
int index = index_ + index_offset;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int accumulator_access_offset =
index + n * Policy::kAccumulatorColumnStride / Policy::kElementsPerAccess;
frag_ptr[n] = accumulators_[accumulator_access_offset];
}
}
};
////////////////////////////////////////////////////////////////////////////////
/// Dedicated to interleaved layout
template <
/// shape of the warp-level GEMM tile
typename WarpShape_,
/// matrix multiply operator shape (concept: gemm::GemmShape)
typename OperatorShape_,
/// matrix multiply operator data type (concept: data type)
typename OperatorElementC_,
/// matrix multiply operator fragment (concept: Array)
typename OperatorFragmentC_,
/// number of interleaved k
int InterleavedK>
class FragmentIteratorTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_,
layout::ColumnMajorInterleaved<InterleavedK>> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using OperatorElementC = OperatorElementC_;
using OperatorFragmentC = OperatorFragmentC_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// This is the fragment size produced by one access of the iterator.
using Fragment =
Array<OperatorElementC,
Policy::kElementsPerAccess * InterleavedK / OperatorShape::kN>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile =
Array<OperatorElementC, OperatorFragmentC::kElements *
Policy::OperatorCount::kRow *
Policy::OperatorCount::kColumn>;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
using TileIterations = typename Policy::TileIterations;
static int const kIterationsPerTile = kIterations / TileIterations::kCount;
private:
/// Internal access type
using AccessType =
Array<OperatorElementC, Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp(AccumulatorTile const &accum)
: accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0) {}
/// Increments
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp &operator++() {
++index_;
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
FragmentIteratorTensorOp &operator--() {
--index_;
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, int index_offset = 0) const {
int index = index_ + index_offset;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < (InterleavedK / OperatorShape::kN); ++n) {
int index_m = index % (Policy::OperatorCount::kRow *
Policy::kIterationsPerInstruction);
int index_n = index / (Policy::OperatorCount::kRow *
Policy::kIterationsPerInstruction);
int accumulator_access_offset =
(index_m / Policy::kIterationsPerInstruction) *
(Policy::OperatorCount::kColumn *
Policy::kIterationsPerInstruction) +
(index_m % Policy::kIterationsPerInstruction) +
index_n * (InterleavedK / OperatorShape::kN) *
Policy::kIterationsPerInstruction +
n * Policy::kIterationsPerInstruction;
frag_ptr[n] = accumulators_[accumulator_access_offset];
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 9,883 | C | 34.553957 | 117 | 0.663058 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_relu.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination with a maximum operation used by epilogues.
*/
#pragma once
#include <cutlass/half.h>
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/epilogue/thread/scale_type.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Single source of truth for whether to unroll for `LinearCombinationClamp()`
constexpr bool LinearCombinationReluIsHeavy() {
return false;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class LinearCombinationRelu {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static int const kCount = Count;
static const ScaleType::Kind kScale = Scale;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentCompute = Array<ElementCompute, kCount>;
using FragmentScaleBias = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
static bool const kIsHeavy = detail::LinearCombinationReluIsHeavy();
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute threshold; ///< minimum value that is output
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
threshold(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta = ElementCompute(0),
ElementCompute threshold = ElementCompute(0)
): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr = nullptr,
ElementCompute threshold = ElementCompute(0)
): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
ElementCompute threshold_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationRelu(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
threshold_ = params.threshold;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::OnlyAlphaPerChannelScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
// set to NaN to make ReLU no-op for all except last k partitions
int64_t allones = -1;
threshold_ = reinterpret_cast<ElementCompute const &>(allones);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
ReLu<FragmentCompute> relu;
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
}
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_accumulator;
ReLu<FragmentCompute> relu;
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
}
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
/// Computes per-channel linear scaling and bias : D = scale * accumulator + bias
/// Scale and Bias are from input Fragment
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentScaleBias const &scale,
FragmentScaleBias const &bias) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform per-channel scale and bias
FragmentCompute intermediate;
multiply_add<FragmentCompute> mul_add_accumulator;
if(Scale == ScaleType::OnlyAlphaPerChannelScaling)
intermediate = mul_add_accumulator(scale, converted_accumulator, bias); // D = scale * Accum + bias
else
intermediate = mul_add_accumulator(alpha_, converted_accumulator, bias); // D = alpha * Accum + bias
ReLu<FragmentCompute> relu;
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conditional guards to enable partial specialization for packed integers
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 720) && ((__CUDACC_VER_MAJOR__ > 10) || ((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2)))
/// Applies a linear combination operator to an array of elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
/// Special handling for int types
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
ScaleType::Kind Scale, ///< Control Alpha and Beta scaling
FloatRoundStyle Round
>
class LinearCombinationRelu <ElementOutput_, Count, int, float, Scale, Round> {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = int;
using ElementCompute = float;
static bool const kIsHeavy = detail::LinearCombinationReluIsHeavy();
static int const kCount = Count;
static const ScaleType::Kind kScale = Scale;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentCompute = Array<ElementCompute, kCount>;
using FragmentScaleBias = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute threshold; ///< minimum value that is output
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
threshold(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta = ElementCompute(0),
ElementCompute threshold = ElementCompute(0)
): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr = nullptr,
ElementCompute threshold = ElementCompute(0)
): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
ElementCompute threshold_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationRelu(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
threshold_ = params.threshold;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::OnlyAlphaPerChannelScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
// set to NaN to make ReLU no-op for all except last k partitions
int64_t allones = -1;
threshold_ = reinterpret_cast<ElementCompute const &>(allones);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
ReLu<FragmentCompute> relu;
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
}
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
if (platform::numeric_limits<ElementOutput>::is_integer) {
// Convert floats back to INT
FragmentAccumulator scaled_accumulator;
NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter;
scaled_accumulator = compute_converter(intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, int, kCount, Round>
destination_converter;
return destination_converter(scaled_accumulator);
} else {
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_accumulator;
ReLu<FragmentCompute> relu;
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
}
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
if (platform::numeric_limits<ElementOutput>::is_integer) {
// Convert floats back to INT
FragmentAccumulator scaled_accumulator;
NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter;
scaled_accumulator = compute_converter(intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, int, kCount, Round>
destination_converter;
return destination_converter(scaled_accumulator);
} else {
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
}
/// Computes per-channel linear scaling and bias : D = scale * accumulator + bias
/// Scale and Bias are from input Fragment
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentScaleBias const &scale,
FragmentScaleBias const &bias) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform per-channel scale and bias
FragmentCompute intermediate;
multiply_add<FragmentCompute> mul_add_accumulator;
if(Scale == ScaleType::OnlyAlphaPerChannelScaling)
intermediate = mul_add_accumulator(scale, converted_accumulator, bias); // D = scale * Accum + bias
else
intermediate = mul_add_accumulator(alpha_, converted_accumulator, bias); // D = alpha * Accum + bias
ReLu<FragmentCompute> relu;
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
if (platform::numeric_limits<ElementOutput>::is_integer) {
// Convert floats back to INT
FragmentAccumulator scaled_accumulator;
NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter;
scaled_accumulator = compute_converter(intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, int, kCount, Round>
destination_converter;
return destination_converter(scaled_accumulator);
} else {
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
}
};
#endif // Conditional guards to enable partial specialization for packed integers
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 20,486 | C | 34.879159 | 150 | 0.673191 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_residual_block.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue functor specialized for residual blocks in deep neural networks.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
namespace detail {
/// Dummy class used to designate that the second binary operator in the epilogue is unsued
template <typename T>
class NoOp {};
}
/// Models a residual block of the form: UnaryOp(BinaryOp(BinaryOp(ActivationOp(TensorOp(X) + bias), residual1), residual2))
template <typename ElementOutput_, typename ElementAccumulator_,
typename ElementCompute_, typename ElementC_, int ElementsPerAccess,
template <typename T> class ActivationOp_,
template <typename T> class BinaryOp1_,
template <typename T> class UnaryOp_,
template <typename T> class BinaryOp2_ = detail::NoOp>
class LinearCombinationResidualBlock {
public:
static bool const kIsSingleSource = false;
using ElementOutput = ElementC_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kCount = kElementsPerAccess;
using UnaryOp = UnaryOp_<Array<ElementCompute, kCount>>;
using BinaryOp1 = BinaryOp1_<Array<ElementCompute, kCount>>;
using BinaryOp2 = BinaryOp2_<Array<ElementCompute, kCount>>;
using ActivationOp = ActivationOp_<Array<ElementCompute, kCount>>;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementC, kElementsPerAccess>;
using FragmentOutput = Array<ElementOutput, kElementsPerAccess>;
using ElementZ = ElementOutput_;
using ElementT = ElementZ;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using FragmentT = Array<ElementT, kElementsPerAccess>;
static bool const kIsHeavy = true;
static bool const kStoreZ = true;
static bool const kStoreT = false;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales residual input
ElementCompute const *alpha_ptr{nullptr}; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr{nullptr}; ///< pointer to residual scalar - if not null, loads it from memory
CUTLASS_HOST_DEVICE
Params() : alpha(ElementCompute(1)), beta(ElementCompute(1)) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute alpha, ElementCompute beta)
: alpha(alpha), beta(beta) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr)
: alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {}
};
private:
ElementCompute alpha_;
ElementCompute beta_;
bool skip_elementwise_;
public:
/// Constructor from Params
CUTLASS_HOST_DEVICE
LinearCombinationResidualBlock(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
skip_elementwise_ = false;
}
/// The "source" tensor corresponds to the residual input
CUTLASS_HOST_DEVICE
bool is_source_needed() const { return true; }
/// Functionally required for serial reduction in the epilogue
/// IMPORTANT: Split-k is supported only when ActivationOp is Identity.
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
skip_elementwise_ = true;
}
}
/// Applies the operation UnaryOp(BinaryOp(BinaryOp(ActivationOp(AB + bias), residual1), residual2))
CUTLASS_HOST_DEVICE
void operator()(FragmentOutput &frag_Z, FragmentOutput &, FragmentAccumulator const &AB,
FragmentC const &residual1, FragmentC const &residual2,
FragmentCompute const &bias) const {
UnaryOp unary_op;
BinaryOp1 binary_op1;
BinaryOp2 binary_op2;
ActivationOp activation;
FragmentCompute tmp_Accum =
NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute tmp_residual1 =
NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(residual1);
FragmentCompute tmp_residual2 =
NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(residual2);
FragmentCompute z =
binary_op2(binary_op1(activation(alpha_ * tmp_Accum + bias), beta_ * tmp_residual1), beta_ * tmp_residual2);
FragmentCompute result_Z = skip_elementwise_ ? z : unary_op(z);
NumericArrayConverter<ElementOutput, ElementCompute, kElementsPerAccess> convert_z;
frag_Z = convert_z(result_Z);
}
/// Should never be called
CUTLASS_HOST_DEVICE
void operator()(FragmentOutput &, FragmentOutput &, FragmentAccumulator const &,
FragmentCompute const &) const {}
};
/// Models a residual block of the form: UnaryOp(BinaryOp(ActivationOp(TensorOp(X) + bias), residual))
template <typename ElementOutput_, typename ElementAccumulator_,
typename ElementCompute_, typename ElementC_, int ElementsPerAccess,
template <typename T> class ActivationOp_,
template <typename T> class BinaryOp1_,
template <typename T> class UnaryOp_>
class LinearCombinationResidualBlock<ElementOutput_, ElementAccumulator_,
ElementCompute_, ElementC_, ElementsPerAccess,
ActivationOp_, BinaryOp1_, UnaryOp_,
detail::NoOp> {
public:
static bool const kIsSingleSource = true;
using ElementOutput = ElementC_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kCount = kElementsPerAccess;
using UnaryOp = UnaryOp_<Array<ElementCompute, kCount>>;
using BinaryOp = BinaryOp1_<Array<ElementCompute, kCount>>;
using ActivationOp = ActivationOp_<Array<ElementCompute, kCount>>;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementC, kElementsPerAccess>;
using FragmentOutput = Array<ElementOutput, kElementsPerAccess>;
using ElementZ = ElementOutput_;
using ElementT = ElementZ;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using FragmentT = Array<ElementT, kElementsPerAccess>;
static bool const kIsHeavy = true;
static bool const kStoreZ = true;
static bool const kStoreT = false;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales residual input
ElementCompute const *alpha_ptr{nullptr}; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr{nullptr}; ///< pointer to residual scalar - if not null, loads it from memory
CUTLASS_HOST_DEVICE
Params() : alpha(ElementCompute(1)), beta(ElementCompute(1)) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute alpha, ElementCompute beta)
: alpha(alpha), beta(beta) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr)
: alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {}
};
private:
ElementCompute alpha_;
ElementCompute beta_;
bool skip_elementwise_;
public:
/// Constructor from Params
CUTLASS_HOST_DEVICE
LinearCombinationResidualBlock(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
skip_elementwise_ = false;
}
/// The "source" tensor corresponds to the residual input
CUTLASS_HOST_DEVICE
bool is_source_needed() const { return true; }
/// Functionally required for serial reduction in the epilogue
/// IMPORTANT: Split-k is supported only when ActivationOp is Identity.
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
skip_elementwise_ = true;
}
}
/// Applies the operation UnaryOp(BinaryOp(ActivationOp(AB + bias), residual))
CUTLASS_HOST_DEVICE
void operator()(FragmentOutput &frag_Z, FragmentOutput &, FragmentAccumulator const &AB,
FragmentC const &residual,
FragmentCompute const &bias) const {
UnaryOp unary_op;
BinaryOp binary_op;
ActivationOp activation;
FragmentCompute tmp_Accum =
NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute tmp_residual =
NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(residual);
FragmentCompute z =
binary_op(activation(alpha_ * tmp_Accum + bias), beta_ * tmp_residual);
FragmentCompute result_Z = skip_elementwise_ ? z : unary_op(z);
NumericArrayConverter<ElementOutput, ElementCompute, kElementsPerAccess> convert_z;
frag_Z = convert_z(result_Z);
}
/// Should never be called
CUTLASS_HOST_DEVICE
void operator()(FragmentOutput &, FragmentOutput &, FragmentAccumulator const &,
FragmentCompute const &) const {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,855 | C | 38.128713 | 124 | 0.690257 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_bias_elementwise.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/activation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This base class is meant to define the concept required of the
/// EpilogueWithBroadcast::OutputOp
template <
typename ElementC_,
typename ElementAccumulator_,
typename ElementCompute_,
typename ElementZ_,
typename ElementT_,
int ElementsPerAccess,
typename ElementwiseOp_ = Identity<ElementCompute_>,
typename BinaryOp_ = plus<ElementCompute_>
>
class LinearCombinationBiasElementwise {
public:
using ElementOutput = ElementC_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementZ = ElementZ_;
using ElementT = ElementT_;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kCount = kElementsPerAccess;
using ElementwiseOp = ElementwiseOp_;
using BinaryOp = BinaryOp_;
// Indicates that this epilogue applies only one binary operation
static bool const kIsSingleSource = true;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementOutput, kElementsPerAccess>;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using FragmentT = Array<ElementT, kElementsPerAccess>;
using FragmentOutput = FragmentZ;
static bool const kIsHeavy = ElementwiseOp::kIsHeavy;
/// If true, the 'Z' tensor is stored
static bool const kStoreZ = true;
/// If true, the 'T' tensor is stored
static bool const kStoreT = true;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta
): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha
): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
bool skip_elementwise_;
public:
//
// Methods
//
/// Constructor from Params
CUTLASS_HOST_DEVICE
LinearCombinationBiasElementwise(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
skip_elementwise_ = false;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
skip_elementwise_ = true;
}
}
/// Applies the operation when is_source_needed() is true
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentC const &frag_C,
FragmentCompute const &V) const {
ElementwiseOp elementwise_op;
BinaryOp binary_op;
FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute tmp_C = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(frag_C);
FragmentCompute result_Z;
FragmentCompute result_T;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
ElementCompute z = binary_op(alpha_ * tmp_Accum[i] + beta_ * tmp_C[i], V[i]);
result_T[i] = z;
result_Z[i] = skip_elementwise_ ? z : elementwise_op(z);
}
NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z;
frag_Z = convert_z(result_Z);
NumericArrayConverter<ElementT, ElementCompute, kElementsPerAccess> convert_t;
frag_T = convert_t(result_T);
}
/// Applies the operation when is_source_needed() is false
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentCompute const &V) const {
ElementwiseOp elementwise_op;
BinaryOp binary_op;
FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute result_Z;
FragmentCompute result_T;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
ElementCompute z = binary_op(alpha_ * tmp_Accum[i], V[i]);
result_T[i] = z;
result_Z[i] = skip_elementwise_ ? z : elementwise_op(z);
}
NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z;
frag_Z = convert_z(result_Z);
NumericArrayConverter<ElementT, ElementCompute, kElementsPerAccess> convert_t;
frag_T = convert_t(result_T);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,344 | C | 30.97318 | 116 | 0.657958 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_generic.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/scale_type.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator followed by an activation function to an array of elements.
///
/// D = activation(alpha * accumulator + beta * source + uniform)
///
template <
template<typename T> class ActivationFunctor,
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
bool IsHeavy = false
>
class LinearCombinationGeneric {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static bool const kIsHeavy = IsHeavy;
static int const kCount = Count;
static const ScaleType::Kind kScale = Scale;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentCompute = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
using Params = typename ActivationFunctor<FragmentCompute>::Params;
private:
//
// Data members
//
Params params_;
bool skip_elementwise_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationGeneric(Params const ¶ms) {
params_ = params;
params_.alpha = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
params_.beta = (params.beta_ptr ? *params.beta_ptr : params.beta);
skip_elementwise_ = false;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return params_.beta != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
params_.beta = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
skip_elementwise_ = true;
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
ActivationFunctor<FragmentCompute> activation;
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(params_.alpha, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_source(params_.beta, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(params_.alpha, converted_accumulator, intermediate); // D = alpha * Accum + X
}
intermediate = skip_elementwise_ ? intermediate : activation(intermediate, params_);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_accumulator;
ActivationFunctor<FragmentCompute> activation;
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_accumulator(params_.alpha, converted_accumulator); // D = alpha * Accum
}
intermediate = skip_elementwise_ ? intermediate : activation(intermediate, params_);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
| 8,065 | C | 37.778846 | 129 | 0.673404 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_sigmoid.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination with Sigmoid operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/epilogue/thread/linear_combination_generic.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator followed by the Sigmoid activation, to an array of elements.
///
/// D = sigmoid(alpha * accumulator + beta * source + uniform)
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
using LinearCombinationSigmoid = LinearCombinationGeneric<Sigmoid, ElementOutput_, Count, ElementAccumulator_,
ElementCompute_, Scale, Round, true>;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
| 3,688 | C | 50.957746 | 129 | 0.609273 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_bias_relu.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations used by epilogues.
*/
#pragma once
#include <cuda_fp16.h>
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/activation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element, int ElementsPerAccess>
struct ArrayMaximum {
CUTLASS_HOST_DEVICE
Array<Element, ElementsPerAccess> operator()(
Array<Element, ElementsPerAccess> const &lhs,
Array<Element, ElementsPerAccess> const &rhs) const {
Array<Element, ElementsPerAccess> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
result[i] = fmax(lhs[i], rhs[i]);
}
return result;
}
};
template <int ElementsPerAccess>
struct ArrayMaximum<half_t, ElementsPerAccess> {
CUTLASS_DEVICE
Array<half_t, ElementsPerAccess> operator()(
Array<half_t, ElementsPerAccess> const &lhs,
Array<half_t, ElementsPerAccess> const &rhs) const {
Array<half_t, ElementsPerAccess> result;
#if __CUDA_ARCH__ >= 800
int const kVectorCount = ElementsPerAccess / 2;
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(lhs.raw_data());
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(rhs.raw_data());
__half2 *res_ptr = reinterpret_cast<__half2 *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorCount; ++i) {
res_ptr[i] = __hmax2(lhs_ptr[i], rhs_ptr[i]);
}
#else
__half const *lhs_ptr = reinterpret_cast<__half const *>(lhs.raw_data());
__half const *rhs_ptr = reinterpret_cast<__half const *>(rhs.raw_data());
__half *res_ptr = reinterpret_cast<__half *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
res_ptr[i] = ((lhs_ptr[i] < rhs_ptr[i]) ? rhs_ptr[i] : lhs_ptr[i]);
}
#endif
return result;
}
CUTLASS_DEVICE
Array<half_t, ElementsPerAccess> operator()(
Array<half_t, ElementsPerAccess> const &lhs,
half_t const &rhs) const {
Array<half_t, ElementsPerAccess> result;
#if __CUDA_ARCH__ >= 800
int const kVectorCount = ElementsPerAccess / 2;
__half rhs_raw = reinterpret_cast<__half const &>(rhs);
__half2 rhs_pair = __half2half2(rhs_raw);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(lhs.raw_data());
__half2 *res_ptr = reinterpret_cast<__half2 *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorCount; ++i) {
res_ptr[i] = __hmax2(lhs_ptr[i], rhs_pair);
}
#else
__half const *lhs_ptr = reinterpret_cast<__half const *>(lhs.raw_data());
__half const rhs_raw = reinterpret_cast<__half const &>(rhs);
__half *res_ptr = reinterpret_cast<__half *>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
res_ptr[i] = ((lhs_ptr[i] < rhs_raw) ? rhs_raw : lhs_ptr[i]);
}
#endif
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, int ElementsPerAccess>
struct ReluConditional {
CUTLASS_HOST_DEVICE
void operator()(
bool conditional[],
Array<Element, ElementsPerAccess> const &fragment,
Element threshold) const {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
conditional[i] = !(fragment[i] < threshold);
}
}
};
template <int ElementsPerAccess>
struct ReluConditional<half_t, ElementsPerAccess> {
CUTLASS_DEVICE
void operator()(
bool conditional[],
Array<half_t, ElementsPerAccess> const &fragment,
half_t threshold) const {
__half y = reinterpret_cast<__half const &>(threshold);
__half const *x = reinterpret_cast<__half const *>(fragment.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
conditional[i] = !__hlt(x[i], y);
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This is a partial specialization for fused Bias and ReLU. It supports the option of packing
/// ReLU conditionals in a bit vector that may be used by backwards passes as an optimization.
///
/// This class can only be used with cutlass::epilogue::threadblock::EpilogueWithBroadcast<>.
///
/// This base class is meant to define the concept required of the
/// EpilogueWithBroadcast::OutputOp
template <
typename ElementC_,
typename ElementAccumulator_,
typename ElementCompute_,
typename ElementZ_,
int ElementsPerAccess,
bool StoreT = true
>
class LinearCombinationBiasRelu {
public:
using ElementOutput = ElementC_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementZ = ElementZ_;
using ElementT = uint1b_t;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kCount = kElementsPerAccess;
using ElementwiseOp = ReLu<ElementCompute>;
using BinaryOp = plus<ElementCompute>;
// Indicates that this epilogue applies only one binary operation
static bool const kIsSingleSource = true;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementOutput, kElementsPerAccess>;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using FragmentT = Array<ElementT, kElementsPerAccess>;
/// If true, the 'Z' tensor is stored
static bool const kStoreZ = true;
/// If true, the 'T' tensor is stored
static bool const kStoreT = StoreT;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
ElementZ threshold; ///< ReLu threshold
//
// Methods
//
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute()),
alpha_ptr(nullptr),
beta_ptr(nullptr),
threshold(ElementCompute()) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta,
ElementCompute threshold_ = ElementCompute()
):
alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {
NumericConverter<ElementZ, ElementCompute> convert_threshold;
threshold = convert_threshold(threshold_);
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha
): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr), threshold(ElementZ()) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr,
ElementCompute threshold_ = ElementCompute()
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
NumericConverter<ElementZ, ElementCompute> convert_threshold;
threshold = convert_threshold(threshold_);
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr), threshold(ElementZ()) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
ElementZ threshold_;
public:
//
// Methods
//
/// Constructor from Params
CUTLASS_HOST_DEVICE
LinearCombinationBiasRelu(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
threshold_ = params.threshold;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
// set to NaN to make ReLU no-op for all except last k partitions
int64_t allones = -1;
threshold_ = reinterpret_cast<ElementZ const &>(allones);
}
}
/// Applies the operation when is_source_needed() is true
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentC const &frag_C,
FragmentCompute const &V) const {
BinaryOp binary_op;
FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute tmp_C = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(frag_C);
FragmentCompute result_Z;
bool conditions[kElementsPerAccess];
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
ElementCompute z = alpha_ * tmp_Accum[i];
z += beta_ * tmp_C[i];
z = binary_op(z, V[i]);
result_Z[i] = z;
}
NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z;
frag_Z = convert_z(result_Z);
//
// Compute condition
//
detail::ReluConditional<ElementZ, kElementsPerAccess> relu_conditional;
relu_conditional(conditions, frag_Z, threshold_);
detail::ArrayMaximum<ElementZ, kElementsPerAccess> maximum_op;
frag_Z = maximum_op(frag_Z, threshold_);
if (kStoreT) {
PackPredicates<kElementsPerAccess> pack_predicates;
frag_T = pack_predicates(conditions);
}
}
/// Applies the operation when is_source_needed() is false
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentCompute const &V) const {
BinaryOp binary_op;
FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute result_Z;
bool conditions[kElementsPerAccess];
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
ElementCompute z = binary_op(alpha_ * tmp_Accum[i], V[i]);
result_Z[i] = z;
}
NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z;
frag_Z = convert_z(result_Z);
//
// Compute condition
//
detail::ReluConditional<ElementZ, kElementsPerAccess> relu_conditional;
relu_conditional(conditions, frag_Z, threshold_);
detail::ArrayMaximum<ElementZ, kElementsPerAccess> maximum_op;
frag_Z = maximum_op(frag_Z, threshold_);
//
// Compute conditions
//
//
// Store
//
if (kStoreT) {
PackPredicates<kElementsPerAccess> pack_predicates;
frag_T = pack_predicates(conditions);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 13,490 | C | 28.913525 | 116 | 0.637064 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_planar_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations on planar-complex arrays
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/complex.h"
#include "cutlass/array_planar_complex.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to arrays of planar-complex elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
/// Note, as with most CUTLASS components for planar complex, the template arguments describe
/// the underlying real data type.
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class LinearCombinationPlanarComplex {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static int const kCount = Count;
using FragmentOutput = ArrayPlanarComplex<ElementOutput, kCount>;
using FragmentAccumulator = ArrayPlanarComplex<ElementAccumulator, kCount>;
using ComputeFragment = ArrayPlanarComplex<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
struct Params {
complex<ElementCompute> alpha; ///< scales accumulators
complex<ElementCompute> beta; ///< scales source tensor
complex<ElementCompute> const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
complex<ElementCompute> const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
complex<ElementCompute> alpha,
complex<ElementCompute> beta
): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
complex<ElementCompute> const *alpha_ptr,
complex<ElementCompute> const *beta_ptr
): alpha(complex<ElementCompute>()), beta(complex<ElementCompute>()), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
};
private:
//
// Data members
//
complex<ElementCompute> alpha_;
complex<ElementCompute> beta_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationPlanarComplex(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return beta_.real() != ElementCompute(0) || beta_.imag() != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_source(
source_converter(source.real),
source_converter(source.imag));
ComputeFragment converted_accumulator(
accumulator_converter(accumulator.real),
accumulator_converter(accumulator.imag));
// Perform binary operations
ComputeFragment intermediate;
multiplies<Array<ElementCompute, kCount> > mul_op;
multiply_add<Array<ElementCompute, kCount> > mul_add_op;
// complex multiply: I = beta * C
intermediate.real = mul_op(beta_.real(), converted_source.real);
intermediate.imag = mul_op(beta_.real(), converted_source.imag);
intermediate.real = mul_add_op(-beta_.imag(), converted_source.imag, intermediate.real);
intermediate.imag = mul_add_op( beta_.imag(), converted_source.real, intermediate.imag);
// complex multiply-add: I = alpha * AB + I
intermediate.real = mul_add_op(alpha_.real(), converted_accumulator.real, intermediate.real);
intermediate.imag = mul_add_op(alpha_.real(), converted_accumulator.imag, intermediate.imag);
intermediate.real = mul_add_op(-alpha_.imag(), converted_accumulator.imag, intermediate.real);
intermediate.imag = mul_add_op( alpha_.imag(), converted_accumulator.real, intermediate.imag);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return FragmentOutput(
destination_converter(intermediate.real),
destination_converter(intermediate.imag));
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_accumulator(
accumulator_converter(accumulator.real),
accumulator_converter(accumulator.imag));
// Perform binary operations
ComputeFragment intermediate;
multiplies<Array<ElementCompute, kCount> > mul_op;
multiply_add<Array<ElementCompute, kCount> > mul_add_op;
// complex multiply-add: I = alpha * AB + I
intermediate.real = mul_add_op(alpha_.real(), converted_accumulator.real);
intermediate.imag = mul_add_op(alpha_.real(), converted_accumulator.imag);
intermediate.real = mul_add_op(-alpha_.imag(), converted_accumulator.imag, intermediate.real);
intermediate.imag = mul_add_op( alpha_.imag(), converted_accumulator.real, intermediate.imag);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return FragmentOutput(
destination_converter(intermediate.real),
destination_converter(intermediate.imag));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 9,351 | C | 38.294117 | 129 | 0.666667 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/activation.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This extends the contents of cutlass/functional.h with frequently used activation functions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/constants.h"
#include "cutlass/complex.h"
#include "cutlass/array.h"
#include "cutlass/half.h"
#include "cutlass/functional.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct LinearCombinationGenericParams {
T alpha; ///< scales accumulators
T beta; ///< scales source tensor
T const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
T const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
LinearCombinationGenericParams():
alpha(T(1)),
beta(T(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
LinearCombinationGenericParams(
T alpha,
T beta = T(0)
): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
LinearCombinationGenericParams(
T const *alpha_ptr,
T const *beta_ptr = nullptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Identity operator
template <typename T>
struct Identity {
static const bool kIsHeavy=false;
CUTLASS_HOST_DEVICE
T operator()(T value) const {
return value;
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
T operator()(T const &value, Params const ¶ms_) const {
return this->operator()(value);
}
};
template <typename T, int N>
struct Identity<Array<T, N> > {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
return value;
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value, Params const ¶ms_) const {
return this->operator()(value);
}
};
/// ReLu operator - propagates NaNs
/// Always put threshold in the right hand side of max to propagate NaN.
template <typename T>
struct ReLu {
static const bool kIsHeavy=false;
CUTLASS_HOST_DEVICE
T operator()(T const & threshold, T value) const {
maximum<T> mx;
return mx(value, threshold);
}
CUTLASS_HOST_DEVICE
T operator()(T value) const {
maximum<T> mx;
return mx(value, T(0));
}
/// Host-constructable parameters structure
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
T operator()(T value, Params const ¶ms_) const {
return this->operator()(value);
}
};
template <typename T, int N>
struct ReLu<Array<T, N>> {
static const bool kIsHeavy=false;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(T const & threshold, Array<T, N> const &frag) const {
maximum<Array<T, N> > mx;
return mx(frag, threshold);
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &frag) const {
maximum<Array<T, N> > mx;
return mx(frag, T(0));
}
/// Host-constructable parameters structure
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &frag, Params const ¶ms_) const {
return this->operator()(frag);
}
};
// Leaky Relu operator
template <typename T>
struct LeakyReLU {
struct Params: LinearCombinationGenericParams<T> {
T leaky_alpha; ///< leaky_alpha
// Methods
using LinearCombinationGenericParams<T>::LinearCombinationGenericParams;
CUTLASS_HOST_DEVICE
Params():
LinearCombinationGenericParams<T>(),
leaky_alpha(T(1)) {}
CUTLASS_HOST_DEVICE
Params(
T alpha,
T beta,
T leaky_alpha = T(1)
): LinearCombinationGenericParams<T>(alpha, beta), leaky_alpha(leaky_alpha) {}
};
CUTLASS_HOST_DEVICE
T operator()(T const &value, T const & alpha_recip) const {
T res = value > T(0) ? value : value * alpha_recip;
return res;
}
CUTLASS_HOST_DEVICE
T operator()(T const &value, Params const ¶ms_) const {
this->operator()(value, params_.leaky_alpha);
}
};
template <typename T, int N>
struct LeakyReLU<Array<T, N> > {
struct Params: LinearCombinationGenericParams<T> {
T leaky_alpha; ///< leaky_alpha
using LinearCombinationGenericParams<T>::LinearCombinationGenericParams;
// Methods
CUTLASS_HOST_DEVICE
Params():
LinearCombinationGenericParams<T>(),
leaky_alpha(T(1)) {}
CUTLASS_HOST_DEVICE
Params(
T alpha,
T beta,
T leaky_alpha = T(1)
): LinearCombinationGenericParams<T>(alpha, beta), leaky_alpha(leaky_alpha) {}
};
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value, T const & alpha_recip) const {
Array<T, N> y;
LeakyReLU<T> leaky_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < int(value.size()); ++i) {
y[i] = leaky_op(value[i], alpha_recip);
}
return y;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value, Params const ¶ms_) const {
return this->operator()(value, params_.leaky_alpha);
}
};
// Tanh operator
template <typename T>
struct Tanh {
CUTLASS_HOST_DEVICE
T operator()(T const &scalar) const {
return fast_tanh(scalar);
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
T operator()(T const &scalar, Params const ¶ms_) const {
return this->operator()(scalar);
}
};
template <typename T, int N>
struct Tanh<Array<T, N> > {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
Tanh<T> tanh_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = tanh_op(value[i]);
}
return y;
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value, Params const ¶ms_) const {
return this->operator()(value);
}
};
template <int N>
struct Tanh<Array<half_t, N>> {
using T = half_t;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& z) const {
fast_tanh_op<Array<T, N>> tanh;
return tanh(z);
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value, Params const ¶ms_) const {
return this->operator()(value);
}
};
// Sigmoid operator
template <typename T>
struct Sigmoid {
CUTLASS_HOST_DEVICE
T operator()(T const &scalar) const {
return T(1) / (T(1) + fast_exp(-scalar));
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
T operator()(T const &scalar, Params const ¶ms_) const {
return this->operator()(scalar);
}
};
template <typename T, int N>
struct Sigmoid<Array<T, N> > {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
Sigmoid<T> sigmoid_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = sigmoid_op(value[i]);
}
return y;
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value, Params const ¶ms_) const {
return this->operator()(value);
}
};
template <int N>
struct Sigmoid<Array<half_t, N>> {
using T = half_t;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& z) const {
plus<Array<T, N>> add;
#if defined(CUTLASS_USE_TANH_FOR_SIGMOID)
multiplies<Array<T, N>> mul;
fast_tanh_op<Array<T, N>> tanh;
return mul(add(tanh(mul(z, cutlass::constants::half<T>())), cutlass::constants::one<T>()),
cutlass::constants::half<T>());
#else
divides<Array<T, N>> div;
negate<Array<T, N>> neg;
fast_exp_op<Array<T, N>> fast_exp;
return div(cutlass::constants::one<T>(),
add(cutlass::constants::one<T>(),
fast_exp(neg(z))));
#endif
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &z, Params const ¶ms_) const {
return this->operator()(z);
}
};
// SiLu (swish) operator introduced by Elfwing et al. in the following paper
// "Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning" (2017)
// https://arxiv.org/pdf/1702.03118.pdf
// It is used in EfficientNet and YOLOv5, for example.
// Reference: https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html
template <typename T>
struct SiLu {
CUTLASS_HOST_DEVICE
T operator()(T const &scalar) const {
Sigmoid<T> sigmoid;
return scalar * sigmoid(scalar);
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
T operator()(T const &scalar, Params const ¶ms_) const {
return this->operator()(scalar);
}
};
template <typename T, int N>
struct SiLu<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Sigmoid<Array<T, N>> sigmoid_op;
multiplies<Array<T, N>> mul;
return mul(value, sigmoid_op(value));
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value, Params const ¶ms_) const {
return this->operator()(value);
}
};
// Hardswish operator introduced by Howard et al. in the following paper
// "Searching for MobileNetV3" (2019)
// https://arxiv.org/pdf/1905.02244.pdf
// It is used in models based on MobilenetNetV3.
// Reference: https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html
template <typename T>
struct HardSwish {
CUTLASS_HOST_DEVICE
T operator()(T const &x) const {
minimum<T> mn;
maximum<T> mx;
T relu6 = mn(mx(x + T(3), T(0)), T(6));
return x * relu6 / T(6);
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
T operator()(T const &x, Params const ¶ms_) const {
return this->operator()(x);
}
};
template <>
struct HardSwish<float> {
using T = float;
CUTLASS_HOST_DEVICE
T operator()(T const &x) const {
minimum<T> mn;
maximum<T> mx;
T relu6 = mn(mx(x + T(3), T(0)), T(6));
return x * relu6 * 0.16666667f;
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
T operator()(T const &x, Params const ¶ms_) const {
return this->operator()(x);
}
};
template <typename T, int N>
struct HardSwish<Array<T, N> > {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
HardSwish<T> hardswish_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = hardswish_op(value[i]);
}
return y;
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &x, Params const ¶ms_) const {
return this->operator()(x);
}
};
template <int N>
struct HardSwish<Array<half_t, N> > {
using T = half_t;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
minimum<Array<T, N> > mn;
maximum<Array<T, N> > mx;
multiplies<Array<T, N> > mul;
plus<Array<T, N> > add;
return mul(mul(mn(mx(add(value, T(3)), T(0)), T(6)), value), T(0.16666667f));
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &x, Params const ¶ms_) const {
return this->operator()(x);
}
};
//
// GELU function definitions implemented as described by
// Hendrycks, D., and Gimpel, K. in
// "Gaussian Error Linear Units (GELUs)." (2020)
// https://arxiv.org/pdf/1606.08415.pdf
//
// Floating-point constants are Taylor coefficients described in the paper.
//
// GELU operator
template <typename T>
struct GELU {
CUTLASS_HOST_DEVICE
T operator()(T const &scalar) const {
return T(cutlass::constants::half<T>() * scalar *
(cutlass::constants::one<T>() + (T)erff((float)(scalar / cutlass::constants::root_two<T>()))));
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
T operator()(T const &scalar, Params const ¶ms_) const {
return this->operator()(scalar);
}
};
template <>
struct GELU<float> {
CUTLASS_HOST_DEVICE
float operator()(float const &scalar) const {
return cutlass::constants::half<float>() * scalar *
(cutlass::constants::one<float>() + erff( scalar / cutlass::constants::root_two<float>() ));
}
using Params = LinearCombinationGenericParams<float>;
CUTLASS_HOST_DEVICE
float operator()(float const &scalar, Params const ¶ms_) const {
return this->operator()(scalar);
}
};
template <>
struct GELU<double> {
CUTLASS_HOST_DEVICE
double operator()(double const &scalar) const {
return cutlass::constants::half<double>() * scalar *
(cutlass::constants::one<double>() + erf( scalar / cutlass::constants::root_two<double>() ));
}
using Params = LinearCombinationGenericParams<double>;
CUTLASS_HOST_DEVICE
double operator()(double const &scalar, Params const ¶ms_) const {
return this->operator()(scalar);
}
};
template <typename T, int N>
struct GELU<Array<T, N> > {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
GELU<T> gelu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = gelu_op(value[i]);
}
return y;
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value, Params const ¶ms_) const {
return this->operator()(value);
}
};
// GELU operator implemented using the Taylor series approximation
template <typename T>
struct GELU_taylor {
static const bool kIsHeavy=true;
CUTLASS_HOST_DEVICE
T operator()(T const &z) const {
T k0 = T(0.7978845608028654);
T k1 = T(0.044715);
return T(cutlass::constants::half<T>() * z *
(cutlass::constants::one<T>() + fast_tanh(k0 * z * (cutlass::constants::one<T>() + k1 * z * z))));
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
T operator()(T const &scalar, Params const ¶ms_) const {
return this->operator()(scalar);
}
};
template <int N>
struct GELU_taylor<Array<half_t, N> > {
static const bool kIsHeavy=true;
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const &z) const {
using T = half_t;
Array<half_t, N> y;
half_t k0 = half_t(0.7978845608028654);
half_t k1 = half_t(0.044715);
multiply_add<Array<half_t, N>> fma;
multiplies<Array<half_t, N>> mul;
plus<Array<half_t, N>> add;
fast_tanh_op<Array<half_t, N>> tanh;
Array<half_t, N> u = mul(mul(k0, z), fma(mul(k1, z), z, cutlass::constants::one<T>()));
y = mul(mul(z, cutlass::constants::half<T>()), add(cutlass::constants::one<T>(), tanh(u)));
return y;
}
using Params = LinearCombinationGenericParams<half_t>;
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const &value, Params const ¶ms_) const {
return this->operator()(value);
}
};
template <typename T, int N>
struct GELU_taylor<Array<T, N> > {
static const bool kIsHeavy=true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
GELU_taylor<T> gelu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = gelu_op(value[i]);
}
return y;
}
using Params = LinearCombinationGenericParams<T>;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value, Params const ¶ms_) const {
return this->operator()(value);
}
};
/// Computes backwards pass for GELU operator assuming d_t is the layer gradient and
/// z is computed from the forward pass.
template <typename T>
struct dGELU {
CUTLASS_HOST_DEVICE
T operator()(T const &d_t, T const &z) const {
T k0 = T(0.7978845608028654);
T k1 = T(0.044715);
T k2 = T(0.1070322243);
T tanh_out = fast_tanh(k0 * z * (1 + k1 * z * z));
T ff = constants::half<T>() * z * ((1 - tanh_out * tanh_out) * (k0 + k2 * z * z)) +
constants::half<T>() * (1 + tanh_out);
return ff * d_t;
}
};
template <typename T, int N>
struct dGELU<Array<T, N> > {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &d_t, Array<T, N> const &z) const {
Array<T, N> y;
dGELU<T> gelu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = gelu_op(d_t[i], z[i]);
}
return y;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 18,909 | C | 25.784703 | 109 | 0.629965 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_clamp.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear scaling operations used by epilogues. Values are clamped before
converting to the output element type.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Single source of truth for whether to unroll for `LinearCombinationClamp()`
constexpr bool LinearCombinationClampIsHeavy() {
return false;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements then clamps the output before
/// converting to the output element type.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class LinearCombinationClamp {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
static bool const kIsHeavy = detail::LinearCombinationClampIsHeavy();
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta
): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha
): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationClamp(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source,
ElementCompute uniform = ElementCompute(0)) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_source = source_converter(source);
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_add_source;
multiply_add<ComputeFragment> mul_add_accumulator;
minimum<ComputeFragment> min_accumulator;
maximum<ComputeFragment> max_accumulator;
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
}
/// Clamping constant value
ElementCompute const kClampMax =
ElementCompute(platform::numeric_limits<ElementOutput>::max());
ElementCompute const kClampMin =
ElementCompute(platform::numeric_limits<ElementOutput>::lowest());
intermediate = max_accumulator(intermediate, kClampMin);
intermediate = min_accumulator(intermediate, kClampMax);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_accumulator;
minimum<ComputeFragment> min_accumulator;
maximum<ComputeFragment> max_accumulator;
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
}
/// Clamping constant value
ElementCompute const kClampMax =
ElementCompute(platform::numeric_limits<ElementOutput>::max());
ElementCompute const kClampMin =
ElementCompute(platform::numeric_limits<ElementOutput>::lowest());
intermediate = max_accumulator(intermediate, kClampMin);
intermediate = min_accumulator(intermediate, kClampMax);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conditional guards to enable partial specialization for packed integers
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 720) && ((__CUDACC_VER_MAJOR__ > 10) || ((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2)))
/// Applies a linear combination operator to an array of elements then clamps the output before
/// converting to the output element type.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
ScaleType::Kind Scale, ///< Control Alpha and Beta scaling
FloatRoundStyle Round
>
class LinearCombinationClamp<ElementOutput_, Count, int, float, Scale, Round> {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = int;
using ElementCompute = float;
static_assert(
platform::numeric_limits<ElementOutput>::is_integer,
"This elementwise op expects the output to be int.");
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
static bool const kIsHeavy = detail::LinearCombinationClampIsHeavy();
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta
): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha
): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationClamp(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source,
ElementCompute uniform = ElementCompute(0)) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_source = source_converter(source);
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Compute linear scaling in floating point
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_add_source;
multiply_add<ComputeFragment> mul_add_accumulator;
// Float min-max
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
}
// Convert floats back to INT
FragmentAccumulator scaled_accumulator;
NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter;
scaled_accumulator = compute_converter(intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, int, kCount, Round> destination_converter;
return destination_converter(scaled_accumulator);
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Compute linear scaling in floating point
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_add_accumulator;
// Float min-max
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
}
// Convert floats back to INT
FragmentAccumulator scaled_accumulator;
NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter;
scaled_accumulator = compute_converter(intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, int, kCount, Round> destination_converter;
return destination_converter(scaled_accumulator);
}
};
#endif // Conditional guards to enable partial specialization for packed integers
////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements then clamps
/// the output before converting to the output element type.
///
/// D = alpha * accumulator + beta * source + uniform
///
/// Note: The below method only when problem_size_K <= 256 for signed int8 gemm
/// or problem_size_K <= 128 for unsigned int8 gemm. The default approach is
/// above.
/// TODO: Add logic to fallback to the default approach
template <
/// Data type used to load and store< tensors
typename ElementOutput_,
/// Number of elements computed per operation
int Count,
///< Control Alpha and Beta scaling
ScaleType::Kind Scale = ScaleType::Default,
/// Rounding mode
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest>
class FastLinearCombinationClamp {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = int;
using ElementCompute = float;
static_assert(
platform::numeric_limits<ElementOutput>::is_integer,
"This elementwise op expects the output to be int.");
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
static FloatRoundStyle const kRound = Round;
static bool const kIsHeavy = false;
/// Host-constructable parameters structure
struct Params {
/// scales accumulators
ElementCompute alpha;
/// scales source tensor
ElementCompute beta;
/// pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *alpha_ptr;
/// pointer to source scalar - if not null, loads it from memory
ElementCompute const *beta_ptr;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params()
: alpha(ElementCompute(1)),
beta(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute alpha, ElementCompute beta)
: alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute alpha)
: alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr)
: alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {}
CUTLASS_HOST_DEVICE
Params(ElementCompute const *alpha_ptr)
: alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) {}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
public:
/// Constructs the function object, possibly loading from pointers in host
/// memory
CUTLASS_HOST_DEVICE
FastLinearCombinationClamp(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(FragmentAccumulator const &accumulator,
FragmentOutput const &source,
ElementCompute uniform = ElementCompute(0)) const {
// Convert source to interal compute numeric type
FastNumericArrayConverter<ElementCompute, ElementOutput, kCount, Round>
source_converter;
FastNumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round>
accumulator_converter;
ComputeFragment converted_source = source_converter(source);
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Compute linear scaling in floating point
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_add_source;
multiply_add<ComputeFragment> mul_add_accumulator;
minimum<ComputeFragment> min_accumulator;
maximum<ComputeFragment> max_accumulator;
// Float min-max
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate =
mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator,
intermediate); // D = alpha * Accum + X
}
/// Clamping constant value
ElementCompute const kClamp =
ElementCompute(1 << (sizeof_bits<ElementOutput>::value - 1));
intermediate = max_accumulator(intermediate, -kClamp);
intermediate = min_accumulator(intermediate, kClamp - ElementCompute(1));
// Convert to destination numeric type
FastNumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
FastNumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round>
accumulator_converter;
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Compute linear scaling in floating point
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_accumulator;
minimum<ComputeFragment> min_accumulator;
maximum<ComputeFragment> max_accumulator;
// Float min-max
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_accumulator(alpha_, converted_accumulator);
}
/// Clamping constant value
ElementCompute const kClamp =
ElementCompute(1 << (sizeof_bits<ElementOutput>::value - 1));
intermediate = max_accumulator(intermediate, -kClamp);
intermediate = min_accumulator(intermediate, kClamp - ElementCompute(1));
// Convert to destination numeric type
FastNumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
| 23,649 | C | 33.07781 | 150 | 0.672629 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_drelu.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination with a maximum operation used by epilogues.
*/
#pragma once
#include <cutlass/half.h>
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/activation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementCompute_, ///< Data type returned by this functor
typename ElementAccumulator_, ///< Data type of accumulators
typename ElementSource_, ///< Data type of source tensor
typename ElementTensor_, ///< Data type of additional tensor
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class LinearCombinationDRelu {
public:
using ElementOutput = ElementSource_;
using ElementCompute = ElementCompute_;
using ElementAccumulator = ElementAccumulator_;
using ElementSource = ElementSource_;
using ElementTensor = ElementTensor_;
static int const kCount = Count;
using FragmentCompute = Array<ElementCompute, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentSource = Array<ElementSource, kCount>;
using FragmentTensor = Array<ElementTensor, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute threshold; ///< minimum value that is output
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
threshold(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta,
ElementCompute threshold = ElementCompute(0)
): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr,
ElementCompute threshold = ElementCompute(0)
): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
ElementTensor threshold_;
bool participates_in_reduction_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationDRelu(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
threshold_ = ElementTensor(params.threshold);
participates_in_reduction_ = true;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return beta_ != ElementCompute(0);
}
/// Returns true if the threadblock computes the reduction
CUTLASS_HOST_DEVICE
bool participates_in_reduction() const {
return participates_in_reduction_;
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
// set to NaN to make ReLU no-op for all except last k partitions
int64_t allones = -1;
threshold_ = reinterpret_cast<ElementTensor const &>(allones);
participates_in_reduction_ = false;
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentCompute operator()(
FragmentAccumulator const &accumulator,
FragmentSource const &source,
FragmentTensor const &tensor) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementSource, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
intermediate = mul_add_source(beta_, converted_source); // X = beta * C
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
// dReLU = (cond ? dy : 0)
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
ElementTensor cond = tensor[i];
if (cond <= threshold_) {
intermediate[i] = ElementCompute();
}
}
return intermediate;
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentCompute operator()(
FragmentAccumulator const &accumulator,
FragmentTensor const &tensor) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_accumulator;
intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
// dReLU = (cond ? dy : 0)
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
ElementTensor cond = tensor[i];
if (cond <= threshold_) {
intermediate[i] = ElementCompute();
}
}
return intermediate;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementCompute_, ///< Data type returned by this functor
typename ElementAccumulator_, ///< Data type of accumulators
typename ElementSource_, ///< Data type of source tensor
int Count, ///< Number of elements computed per operation
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class LinearCombinationDReluConditionalBits {
public:
using ElementOutput = ElementSource_;
using ElementCompute = ElementCompute_;
using ElementAccumulator = ElementAccumulator_;
using ElementSource = ElementSource_;
using ElementTensor = uint1b_t;
static bool const kIsHeavy = false;
static int const kCount = Count;
using FragmentCompute = Array<ElementCompute, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentSource = Array<ElementSource, kCount>;
using FragmentTensor = Array<ElementTensor, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta
): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr
): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
FragmentTensor predicate_mask_;
bool participates_in_reduction_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationDReluConditionalBits(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
participates_in_reduction_ = true;
predicate_mask_.clear();
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return beta_ != ElementCompute(0);
}
/// Returns true if the threadblock computes the reduction
CUTLASS_HOST_DEVICE
bool participates_in_reduction() const {
return participates_in_reduction_;
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
predicate_mask_.clear();
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
// Avoid computing the reduction if this isn't the final Split-K slice
participates_in_reduction_ = false;
bit_not<FragmentTensor> not_op;
predicate_mask_ = not_op(predicate_mask_);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_DEVICE
FragmentCompute operator()(
FragmentAccumulator const &accumulator,
FragmentSource const &source,
FragmentTensor const &tensor) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementSource, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
bit_or<FragmentTensor> or_op;
FragmentTensor predicates = or_op(tensor, predicate_mask_);
// Obtain from packed bits
bool conditions[kCount];
UnpackPredicates<kCount> unpack_predicates;
unpack_predicates(conditions, predicates);
// dReLU = (cond ? dy : 0)
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
if (!conditions[i]) {
intermediate[i] = ElementCompute();
}
}
return intermediate;
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentCompute operator()(
FragmentAccumulator const &accumulator,
FragmentTensor const &tensor) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_accumulator;
intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
bit_or<FragmentTensor> or_op;
FragmentTensor predicates = or_op(tensor, predicate_mask_);
// Obtain from packed bits
bool conditions[kCount];
UnpackPredicates<kCount> unpack_predicates;
unpack_predicates(conditions, predicates);
// dReLU = (cond ? dy : 0)
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
if (!conditions[i]) {
intermediate[i] = ElementCompute();
}
}
return intermediate;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 15,195 | C | 32.545254 | 129 | 0.650214 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/reduction_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing reduction operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a reduction sum to an array of elements.
///
///
template <
typename Element_, ///< Data type used to load and store tensors
int Count ///< Number of elements computed per operation
>
class ReductionOpPlus {
public:
using Element = Element_;
static int const kCount = Count;
using Fragment = Array<Element, kCount>;
using Operator = plus<Fragment>;
/// Host-constructable parameters structure
struct Params { };
private:
/// reduction operator
Operator operator_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
ReductionOpPlus(Params const ¶ms) {
}
/// Computes Compute =>
CUTLASS_HOST_DEVICE
Fragment operator()(
Fragment const &lhs,
Fragment const &rhs) const {
return operator_(lhs, rhs);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
| 3,416 | C | 33.867347 | 100 | 0.624707 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/conversion_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing conversion operations used by epilogues.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Converts the result without other operations
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class Convert {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementAccumulator_;
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = FragmentAccumulator;
static FloatRoundStyle const kRound = Round;
static bool const kIsHeavy = false;
/// Host-constructable parameters structure
struct Params {
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() {}
};
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
Convert(Params const ¶ms = Params()) {
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
}
/// Returns true if source is needed based on state of runtime arguments
CUTLASS_HOST_DEVICE
constexpr bool is_source_needed() const {
return false;
}
/// Constexpr function to enable the compiler to optimize away the source loading if it is
/// never needed.
CUTLASS_HOST_DEVICE
constexpr bool is_source_ever_needed() const {
return false;
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source = FragmentOutput(),
ElementCompute uniform = ElementCompute(0)) const {
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementAccumulator, kCount, Round> destination_converter;
return destination_converter(accumulator);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
| 4,691 | C | 34.278195 | 101 | 0.658708 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/output_iterator_parameter.h | #pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/tensor_ref.h"
namespace cutlass {
namespace epilogue {
namespace threadblock {
template<
typename TensorLayout_, ///! The original output tensor layout
typename OutputIteratorLayout_, ///! Layout used by epilogue output iterator
typename TensorRef_, ///! Input tensor to epilogue output iterator
conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad)
typename ConvProblemSize_ ///! Convolutional operator on 2D or 3D problem
>
struct ConvOutputIteratorParameter {
using TensorLayout = TensorLayout_;
using OutputIteratorLayout = OutputIteratorLayout_;
using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord;
using TensorRef = TensorRef_;
static conv::Operator const kConvolutionalOperator = ConvOperator;
using ConvProblemSize = ConvProblemSize_;
/// Wgrad stride idx for implicit gemm algorithm
// Conv2d row-major matrix (KxRSC)
// Conv3d row-major matrix (KxTRSC)
static int const kWgradStrideIdx =
platform::is_same<TensorLayout, layout::TensorNHWC>::value ? 2 : 3;
/// This chooses the appropriate stride element of the C tensor.
static int const kTensorStrideIdx =
(kConvolutionalOperator == conv::Operator::kWgrad ? kWgradStrideIdx : 0);
CUTLASS_HOST_DEVICE
static OutputIteratorLayout layout(const TensorRef & ref) {
return ref.stride(kTensorStrideIdx);
}
CUTLASS_HOST_DEVICE
static OutputTensorCoord extent(ConvProblemSize problem_size) {
return conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn();
}
};
template <
int InterleavedK,
typename TensorRef_,
conv::Operator ConvOperator,
typename ConvProblemSize_
>
struct ConvOutputIteratorParameter<
layout::TensorNCxHWx<InterleavedK>,
layout::TensorNCxHWx<InterleavedK>,
TensorRef_,
ConvOperator,
ConvProblemSize_>
{
using TensorLayout = typename layout::TensorNCxHWx<InterleavedK>;
using OutputIteratorLayout = typename layout::TensorNCxHWx<InterleavedK>;
using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord;
using TensorRef = TensorRef_;
static conv::Operator const kConvolutionalOperator = ConvOperator;
using ConvProblemSize = ConvProblemSize_;
CUTLASS_HOST_DEVICE
static OutputIteratorLayout layout(const TensorRef & ref) {
return ref.stride();
}
CUTLASS_HOST_DEVICE
static OutputTensorCoord extent(ConvProblemSize problem_size) {
return problem_size.output_extent();
}
};
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
| 2,912 | C | 30.32258 | 103 | 0.729052 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_tensor_op_blas3.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_blas3.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator_mixed.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/threadblock/interleaved_epilogue.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess,
/// Is for a symmetric kernel
BlasMode BlasMode_ = BlasMode::kGemm
>
struct DefaultEpilogueTensorOpBlas3 {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
static BlasMode const kBlasMode = BlasMode_;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorBlas3<
OutputTileThreadMap,
ElementOutput,
kBlasMode
>;
using AccumulatorFragmentIterator = typename std::conditional<is_complex<ElementOutput>::value,
cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC>,
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC> >::type;
/// Support several implementations depending on structure of epilogue
using DefaultIterators = detail::DefaultIteratorsTensorOp<
ElementOutput,
ElementAccumulator,
kElementsPerAccess,
Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename OutputTileThreadMap::CompactedThreadMap
>;
using WarpTileIterator = typename DefaultIterators::WarpTileIterator;
using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 7,129 | C | 39.511363 | 100 | 0.670501 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/interleaved_epilogue.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base_streamk.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator without splitk
template <
/// Shape of threadblock tile (concept: GemmShape)
typename Shape_,
/// Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
typename WarpMmaOperator_,
/// Number of partitions of the K dimension
int PartitionsK,
/// Tile iterator reading and writing output tensors
typename OutputTileIterator_,
/// Fragment iterator selecting accumulators
typename AccumulatorFragmentIterator_,
/// Output operator
typename OutputOp_,
/// Number of interleaved k
int InterleavedK>
class InterleavedEpilogue :
public EpilogueBaseStreamK<
Shape_,
PartitionsK,
WarpMmaOperator_,
AccumulatorFragmentIterator_>
{
public:
using BaseStreamK = EpilogueBaseStreamK<
Shape_,
PartitionsK,
WarpMmaOperator_,
AccumulatorFragmentIterator_>;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using OutputTileIterator = OutputTileIterator_;
using OutputOp = OutputOp_;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Fragment type used by the accumulator tile's fragment iterator
using AccumulatorFragment = typename AccumulatorFragmentIterator::Fragment;
/// Accumulator element
using ElementAccumulator = typename AccumulatorTile::Element;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef =
typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<typename OutputTileIterator::Element,
OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType =
Array<ElementAccumulator, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount =
gemm::GemmShape<Shape::kM / WarpMmaOperator::Shape::kM,
Shape::kN / WarpMmaOperator::Shape::kN, kPartitionsK>;
public:
static_assert(OutputTileIterator::kElementsPerAccess,
"This must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements %
OutputTileIterator::kElementsPerAccess),
"Divisibility");
/// Shared storage allocation needed by the epilogue
struct SharedStorage {};
public:
/// Constructor
CUTLASS_DEVICE
InterleavedEpilogue(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx) ///< Id of thread within warp
:
BaseStreamK(thread_idx)
{}
/// Aggregates the accumulator sets shared by peer blocks in the global workspace,
/// performing epilogue computations, writing to output
CUTLASS_DEVICE
void reduce(
int peer_idx_begin,
int peer_idx_end,
int reduce_fragment_idx,
void *element_workspace,
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
OutputTileIterator source_iterator) ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
{
// Redcuce peer accumulator fragments into one fragment
AccumulatorFragment accum_fragment;
BaseStreamK::reduce(accum_fragment, peer_idx_begin, peer_idx_end, reduce_fragment_idx, element_workspace);
// Source-fragment data (zero-initialized for scenarios where the
// output operator allows us to skip loading it from global input)
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
if (output_op.is_source_needed())
{
source_iterator += reduce_fragment_idx;
source_iterator.load(source_fragment);
}
// Compute the output result
typename OutputTileIterator::Fragment output_fragment;
// Apply the output operator
apply_output_operator(output_fragment, output_op, accum_fragment, source_fragment);
// Store the final result
destination_iterator += reduce_fragment_idx;
destination_iterator.store(output_fragment);
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
if (!output_op.is_source_needed()) {
compute_source_not_needed_(output_op, destination_iterator, accumulators);
}
else {
compute_source_needed_(output_op, destination_iterator, accumulators, source_iterator);
}
}
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators ///< Complete warp-level accumulator tile
) {
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Convert fragment
//
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
//
// Compute the output result
//
typename OutputTileIterator::Fragment output_fragment;
apply_output_operator_source_not_needed(output_fragment, output_op, accum_fragment);
//
// Store the final result
//
destination_iterator.set_iteration_index(iter);
destination_iterator.store(output_fragment);
++destination_iterator;
}
}
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
) {
//
// Predicated tile iterators constructed from members
//
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
source_iterator.set_iteration_index(iter);
source_iterator.load(source_fragment);
++source_iterator;
//
// Convert fragment
//
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
//
// Compute the output result
//
typename OutputTileIterator::Fragment output_fragment;
apply_output_operator(output_fragment, output_op, accum_fragment, source_fragment);
//
// Store the final result
//
destination_iterator.set_iteration_index(iter);
destination_iterator.store(output_fragment);
++destination_iterator;
}
}
protected:
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator(
typename OutputTileIterator::Fragment &output_fragment,
OutputOp const &output_op,
typename AccumulatorFragmentIterator::Fragment const &aligned_accum_fragment,
typename OutputTileIterator::Fragment const &source_fragment)
{
OutputAccessType *output_frag_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment);
AccumulatorAccessType const *compute_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(
&aligned_accum_fragment);
OutputAccessType const *source_frag_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment);
int const kOutputOpIterations = OutputTileIterator::Fragment::kElements /
OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
output_frag_ptr[i] = output_op(compute_frag_ptr[i], source_frag_ptr[i]);
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed(
typename OutputTileIterator::Fragment &output_fragment,
OutputOp const &output_op,
typename AccumulatorFragmentIterator::Fragment const &aligned_accum_fragment)
{
OutputAccessType *output_frag_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment);
AccumulatorAccessType const *compute_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(
&aligned_accum_fragment);
int const kOutputOpIterations = OutputTileIterator::Fragment::kElements /
OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
output_frag_ptr[i] = output_op(compute_frag_ptr[i]);
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 13,546 | C | 33.383249 | 126 | 0.675624 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_simt.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using SIMT.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/warp/fragment_iterator_simt.h"
#include "cutlass/epilogue/warp/tile_iterator_simt.h"
#include "cutlass/epilogue/threadblock/default_thread_map_simt.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_direct_conv.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator_pitch_liner.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/threadblock/epilogue_depthwise.h"
#include "cutlass/layout/permute.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for SimtOps.
template <
typename Shape_,
typename WarpMmaSimt_,
typename OutputOp_,
int ElementsPerAccess,
bool ScatterD = false,
typename PermuteDLayout = layout::NoPermute
>
struct DefaultEpilogueSimt {
using Shape = Shape_;
using WarpMmaSimt = WarpMmaSimt_;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
static const int kPartitionsK = Shape::kK / WarpMmaSimt::Shape::kK;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaSimt::LayoutC;
using ElementAccumulator = typename WarpMmaSimt::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapSimt<
Shape,
typename WarpMmaSimt::Shape,
typename WarpMmaSimt::Policy,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorSimt<
typename WarpMmaSimt::Shape,
typename WarpMmaSimt::ThreadMma,
layout::RowMajor,
typename WarpMmaSimt::Policy
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorSimt<
typename WarpMmaSimt::Shape,
typename WarpMmaSimt::ThreadMma,
ElementAccumulator,
layout::RowMajor,
typename WarpMmaSimt::Policy
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator
>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaSimt,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for SimtOps.
template <
typename Shape_,
typename WarpMmaSimt_,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueSimtStridedDgrad {
using Shape = Shape_;
using WarpMmaSimt = WarpMmaSimt_;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
static const int kPartitionsK = Shape::kK / WarpMmaSimt::Shape::kK;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaSimt::LayoutC;
using ElementAccumulator = typename WarpMmaSimt::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapSimt<
Shape,
typename WarpMmaSimt::Shape,
typename WarpMmaSimt::Policy,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad<
OutputTileThreadMap,
ElementOutput
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorSimt<
typename WarpMmaSimt::Shape,
typename WarpMmaSimt::ThreadMma,
layout::RowMajor,
typename WarpMmaSimt::Policy
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorSimt<
typename WarpMmaSimt::Shape,
typename WarpMmaSimt::ThreadMma,
ElementAccumulator,
layout::RowMajor,
typename WarpMmaSimt::Policy
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator
>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaSimt,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for SimtOps.
template <
int Rank,
typename Shape_,
typename WarpMmaSimt_,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueSimtAffineRankN {
using Shape = Shape_;
using WarpMmaSimt = WarpMmaSimt_;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
static const int kPartitionsK = Shape::kK / WarpMmaSimt::Shape::kK;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaSimt::LayoutC;
using ElementAccumulator = typename WarpMmaSimt::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapSimt<
Shape,
typename WarpMmaSimt::Shape,
typename WarpMmaSimt::Policy,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankN<
OutputTileThreadMap,
ElementOutput,
Rank
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorSimt<
typename WarpMmaSimt::Shape,
typename WarpMmaSimt::ThreadMma,
layout::RowMajor,
typename WarpMmaSimt::Policy
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorSimt<
typename WarpMmaSimt::Shape,
typename WarpMmaSimt::ThreadMma,
ElementAccumulator,
layout::RowMajor,
typename WarpMmaSimt::Policy
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator
>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaSimt,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for SimtOps.
template <typename Shape_, // ThreadBlock Shape
typename WarpMmaSimt_, // mma_depthwise_simt
typename OutputOp_,
int ElementsPerAccess_,
typename ThreadOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1>,
typename ThreadBlockOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1> >
struct DefaultDirectConvEpilogueSimt {
using Shape = Shape_;
using WarpMmaSimt = WarpMmaSimt_;
using WarpShape = typename WarpMmaSimt::Shape;
using OutputOp = OutputOp_;
using ThreadOutputShape = ThreadOutputShape_;
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
static int const kElementsPerAccess = ElementsPerAccess_;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaSimt::LayoutC;
using ElementAccumulator = typename WarpMmaSimt::ElementC;
/// Number of threads total
using WarpCount = gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN
>;
static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value;
static int const kThreads = WarpCount::kCount * kWarpSize;
//
// Thread map
//
using OutputTileThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<ThreadBlockOutputShape::kC, ThreadBlockOutputShape::kNHW>,
kThreads,
kElementsPerAccess
>;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorDirectConv<
OutputTileThreadMap,
ElementOutput,
ThreadOutputShape,
ThreadBlockOutputShape
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorSimt<
typename WarpMmaSimt::Shape,
typename WarpMmaSimt::ThreadMma,
layout::RowMajor,
typename WarpMmaSimt::Policy
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorSimtDirect2dConv<
typename WarpMmaSimt::Shape,
ThreadOutputShape,
ThreadBlockOutputShape,
typename WarpMmaSimt::ThreadMma,
ElementAccumulator,
layout::RowMajor,
typename WarpMmaSimt::Policy
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIteratorPitchLiner<
OutputTileThreadMap,
ElementAccumulator
>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::EpilogueDepthwise<
Shape,
ThreadOutputShape,
ThreadBlockOutputShape,
WarpMmaSimt,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 13,319 | C | 30.714286 | 100 | 0.696524 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_workspace.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs.
This does not attempt to target any particular output layout. Instead, each threadblock
streams out its accumulator elements using 128b store operations. This assumes all threadblocks
have unique output tiles.
The target data layout is:
- threadblock indices mapped to linear offsets as (m, n, k), where m is fastest-changing
- threadblock output space partitioned into warps; each warp's region is contiguous
- per-thread accumulators partitioned into 128b accesses
- output memory striped across the threads of a warp
This enables very fast streaming of data, completely limited by the memory system. No predication
or data exchange is performed, and each threadblock is assumed to have a full region of memory
to write to.
This epilogue establishes an upper bound for epilogue performance and is suitable for
reductions across the GEMM K dimension which require a separate workspace.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_, ///< shape of accumulator tile (concept: MatrixShape)
int WarpCount, ///< number of warps
typename FragmentC_ ///< warp-level GEMM operator (concept: gemm::warp::Mma)
>
class EpilogueWorkspace {
public:
using Shape = Shape_;
using FragmentC = FragmentC_;
using ElementC = typename FragmentC::value_type;
static int const kWarpCount = WarpCount;
/// Optimize for 128b accesses
static int const kAccessSizeInBits = 128;
/// Warp size from the perspective of memory operations
static int const kWarpSize = 32;
/// Vector length of accesses
static int const kElementsPerAccess =
kAccessSizeInBits / sizeof_bits<ElementC>::value;
/// Number of stores per thread
static int const kIterations = FragmentC::kElements / kElementsPerAccess;
static_assert(
!(FragmentC::kElements % kElementsPerAccess),
"The number of accumulators must be divisible by the access size.");
/// Total number of vectorized accesses in warp (in units of vector)
static int const kWarpAccesses = kIterations * kWarpSize;
/// Total number of vectorized accesses in threadblock tile (in units of vector)
static int const kThreadblockAccesses = kWarpAccesses * kWarpCount;
/// Parameters structure
struct Params {
/// Pointer to C matrix
ElementC *ptr_C;
/// Stride between tiles along the GEMM N dimension (in units of vectors)
int stride_n;
/// Stride between tiles along the GEMM K dimension (in units of vectors)
int stride_k;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(
ElementC *ptr_C, ///< Pointer to C matrix
int stride_n_, ///< Stride between tiles along the GEMM N dimension (in units of ElementC)
int stride_k_ ///< Stride between tiles along the GEMM K dimension (in units of ElementC)
):
ptr_C(ptr_C), stride_n(stride_n_ / kElementsPerAccess), stride_k(stride_k_ / kElementsPerAccess) {
}
};
/// Shared storage allocation needed by the epilogue
struct SharedStorage {
// Intentionally empty
};
private:
struct alignas((kAccessSizeInBits / 8)) AccessType {
Array<ElementC, kElementsPerAccess> storage;
};
/// Constant reference to parameters object
AccessType *pointer_;
/// Stride between tiles along the n dimension (in vectors)
int stride_n_;
/// Stride between tiles along the k dimension (in vectors)
int stride_k_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWorkspace(
Params const ¶ms, ///< Host-constructable params object
SharedStorage &, ///< Shared storage object
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
pointer_(reinterpret_cast<AccessType *>(params.ptr_C)),
stride_n_(params.stride_n),
stride_k_(params.stride_k) {
// Add per-thread offset
pointer_ += lane_idx + warp_idx * kWarpAccesses;
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
cutlass::gemm::GemmCoord problem_size, ///< Problem size of GEMM (units of ElementC)
cutlass::gemm::GemmCoord tb_tile_coord, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
FragmentC const &accum) { ///< Accumulator tile
// Compute offset for entire threadblock (note, per-thread offset has been folded in already)
AccessType *pointer = pointer_ +
tb_tile_coord.m() * kThreadblockAccesses +
tb_tile_coord.n() * stride_n_ +
tb_tile_coord.k() * stride_k_;
// Cast to vectorized view of accumulator fragments
AccessType const * src_pointer = reinterpret_cast<AccessType const *>(&accum);
// Write out accumulators at full speed
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations; ++i) {
pointer[i * kWarpSize] = src_pointer[i];
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 7,308 | C | 35.914141 | 121 | 0.655993 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/direct_store_epilogue_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <typename Element_>
class DirectStoreEpilogueIterator {
public:
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = 1;
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout) {
stride = layout.stride(0) * sizeof(Element);
}
CUTLASS_HOST_DEVICE
Params(Base const &base) :
Base(base) { }
};
public:
//
// Data members
//
Element *pointer; // pointer to the output matrix
LongIndex stride; // stride in elements between rows
TensorCoord extent; // extent of output matrix
int thread_idx; // thread index
TensorCoord threadblock_offset;
public:
/// Constructor
CUTLASS_DEVICE
DirectStoreEpilogueIterator(
PredicatedTileIteratorParams const & params,
Element *pointer_,
TensorCoord extent_,
int thread_idx_,
TensorCoord threadblock_offset_ = TensorCoord(),
int const * indices = nullptr
):
pointer(pointer_),
stride(params.stride / sizeof(Element)),
extent(extent_),
thread_idx(thread_idx_),
threadblock_offset(threadblock_offset_)
{
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 4,678 | C | 31.720279 | 100 | 0.645361 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_params.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/conv2d_problem_size.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
struct OutputTileShapeDesc {
int column;
int row;
int group;
int cluster;
int tile;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
OutputTileShapeDesc(): column(0), row(0), group(0), cluster(0), tile(0) { }
/// Ctor
CUTLASS_HOST_DEVICE
OutputTileShapeDesc(
int column_,
int row_,
int group_,
int cluster_,
int tile_
):
column(column_),
row(row_),
group(group_),
cluster(cluster_),
tile(tile_) { }
/// Total number of points in the 5D space
CUTLASS_HOST_DEVICE
int count() const {
return column * row * group * cluster * tile;
}
#if 0
CUTLASS_HOST_DEVICE
void print() const {
printf("{%d, %d, %d, %d, %d}", column, row, group, cluster, tile);
}
#endif
};
/// Helper template to construct an OutputTileShapeDesc from a OutputTileShape template.
template <typename Shape>
CUTLASS_HOST_DEVICE
OutputTileShapeDesc make_OutputTileShapeDesc() {
return OutputTileShapeDesc(
Shape::kColumn,
Shape::kRow,
Shape::kGroup,
Shape::kCluster,
Shape::kTile
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Thread map description
struct OutputTileThreadMapDesc {
int threads;
int elements_per_access;
OutputTileShapeDesc shape;
OutputTileShapeDesc iterations;
OutputTileShapeDesc delta;
OutputTileShapeDesc count;
//
// Methods
//
CUTLASS_HOST_DEVICE
OutputTileThreadMapDesc() { }
CUTLASS_HOST_DEVICE
OutputTileThreadMapDesc(
int threads_,
int elements_per_access_,
OutputTileShapeDesc shape_,
OutputTileShapeDesc iterations_,
OutputTileShapeDesc delta_,
OutputTileShapeDesc count_
):
threads(threads_),
elements_per_access(elements_per_access_),
shape(shape_),
iterations(iterations_),
delta(delta_),
count(count_)
{
}
};
/// Helper template to construct an OutputTileShapeDesc from a OutputTileThreadMap template.
template <typename ThreadMap>
CUTLASS_HOST_DEVICE
OutputTileThreadMapDesc make_OutputTileThreadMapDesc() {
return OutputTileThreadMapDesc(
ThreadMap::kThreads,
ThreadMap::kElementsPerAccess,
make_OutputTileShapeDesc<typename ThreadMap::Shape>(),
make_OutputTileShapeDesc<typename ThreadMap::Iterations>(),
make_OutputTileShapeDesc<typename ThreadMap::Delta>(),
make_OutputTileShapeDesc<typename ThreadMap::Count>()
);
}
///////////////////////////////////////////////////////////////////////////////
//
// Parameters struct for PredicatedTileIterator
//
struct PredicatedTileIteratorParams {
using Index = int32_t;
using LongIndex = int64_t;
//
// Data members
//
LongIndex stride; ///< stride in bytes between rows
LongIndex increment_row; ///< increment quantity (in bytes) to advance when moving between rows
LongIndex increment_group; ///< increment quantity (in bytes) to advance when moving to the next group
LongIndex increment_cluster; ///< increment quantity (in bytes) to advance when moving to the next cluster
LongIndex advance_row; ///< amount to add to move to the next 'row' position
LongIndex advance_group; ///< amount to add to move to the next 'group' position
LongIndex advance_cluster; ///< amount to add to move to the next 'cluster' position
LongIndex advance_tile; ///< amount to add to move to the next 'tile'
//
// Methods
//
CUTLASS_HOST_DEVICE
Status initialize(LongIndex stride_, OutputTileThreadMapDesc thread_map) {
stride = stride_;
increment_row = stride * thread_map.delta.row;
increment_group = stride * thread_map.delta.group
- stride * thread_map.delta.row * (thread_map.iterations.row - 1);
increment_cluster = stride * thread_map.delta.cluster
- stride * thread_map.delta.group * (thread_map.iterations.group - 1)
- stride * thread_map.delta.row * (thread_map.iterations.row - 1);
advance_row = stride * thread_map.shape.row;
advance_group =
stride *
(thread_map.shape.group - 1) * thread_map.shape.row * thread_map.count.row;
advance_cluster =
stride *
thread_map.count.group *
thread_map.shape.group *
thread_map.count.row *
thread_map.shape.row;
advance_tile =
stride *
thread_map.shape.group *
thread_map.shape.row *
thread_map.shape.cluster *
thread_map.shape.tile;
return Status::kSuccess;
}
CUTLASS_HOST_DEVICE
Status initialize(Index stride_, OutputTileThreadMapDesc thread_map) {
return initialize(LongIndex(stride_), thread_map);
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorParams() {
initialize(LongIndex(0), OutputTileThreadMapDesc());
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorParams(Index stride, OutputTileThreadMapDesc thread_map) {
initialize(stride, thread_map);
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorParams(LongIndex stride, OutputTileThreadMapDesc thread_map) {
initialize(stride, thread_map);
}
};
///////////////////////////////////////////////////////////////////////////////
//
// Parameters struct for PredicatedTileIteratorDirect2dConv
//
struct PredicatedTileIteratorDirect2dConvParams{
using Index = int32_t;
using LongIndex = int64_t;
//
// Data members
//
FastDivmod pq_divmod;
FastDivmod q_divmod;
LongIndex stride;
LongIndex stride_n;
LongIndex stride_p;
int N;
int P;
int Q;
//
// Methods
//
CUTLASS_HOST_DEVICE
Status initialize(LongIndex stride_,
cutlass::conv::Conv2dProblemSize const &problem_size,
MatrixCoord threadblock_output_shape) {
stride = stride_; // The stride per row of output tensor (bytes)
stride_n = problem_size.P * problem_size.Q;
stride_p = problem_size.Q ;
N = problem_size.N;
P = problem_size.P;
Q = problem_size.Q;
// Fastdivmod for output O, P, Q
if(threadblock_output_shape.row() != 0 && threadblock_output_shape.column() !=0 ){
int tiles_p =
(problem_size.P + (threadblock_output_shape.row() - 1)) / (threadblock_output_shape.row());
int tiles_q = (problem_size.Q + (threadblock_output_shape.column() - 1)) /
(threadblock_output_shape.column());
pq_divmod = FastDivmod(tiles_p * tiles_q);
q_divmod = FastDivmod(tiles_q);
}
return Status::kSuccess;
}
CUTLASS_HOST_DEVICE
Status initialize(
Index stride_,
cutlass::conv::Conv2dProblemSize const &problem_size = cutlass::conv::Conv2dProblemSize(),
MatrixCoord threadblock_output_shape = MatrixCoord()) {
return initialize(LongIndex(stride_), problem_size, threadblock_output_shape);
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorDirect2dConvParams() { initialize(LongIndex(0)); }
CUTLASS_HOST_DEVICE
PredicatedTileIteratorDirect2dConvParams(Index stride,
cutlass::conv::Conv2dProblemSize const &problem_size,
MatrixCoord threadblock_output_shape) {
initialize(stride, problem_size, threadblock_output_shape);
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorDirect2dConvParams(LongIndex stride,
cutlass::conv::Conv2dProblemSize const &problem_size,
MatrixCoord threadblock_output_shape) {
initialize(stride, problem_size, threadblock_output_shape);
}
};
///////////////////////////////////////////////////////////////////////////////
// InterleavedPredicatedTileIterator
///////////////////////////////////////////////////////////////////////////////
/// Predicated tile access iterator descriptor object containing template dependent state
struct InterleavedPredicatedTileIteratorDesc {
int element_size_bits;
int elements_per_access;
int threadmap_warp_size;
layout::PitchLinearCoord threadmap_iterations;
layout::PitchLinearCoord threadmap_delta;
//
// Methods
//
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorDesc() { }
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorDesc(
int element_size_bits_,
int elements_per_access_,
int threadmap_warp_size_,
layout::PitchLinearCoord threadmap_iterations_,
layout::PitchLinearCoord threadmap_delta_
):
element_size_bits(element_size_bits_),
elements_per_access(elements_per_access_),
threadmap_warp_size(threadmap_warp_size_),
threadmap_iterations(threadmap_iterations_),
threadmap_delta(threadmap_delta_) { }
};
//
// Parameters struct InterleavedPredicatedTileIterator
//
struct InterleavedPredicatedTileIteratorParams {
using Index = int32_t;
using LongIndex = int64_t;
//
// Data members
//
LongIndex stride; ///< stride in bytes between rows
LongIndex advance_row; ///< amount to add to move to the next 'row' position
LongIndex advance_column; ///< amount to add to move to the next 'column' position
//
// Methods
//
CUTLASS_HOST_DEVICE
Status initialize(LongIndex stride_, InterleavedPredicatedTileIteratorDesc desc) {
stride = stride_;
advance_row = desc.threadmap_delta.contiguous() * desc.element_size_bits / 8;
advance_column = stride_ - desc.threadmap_iterations.contiguous() *
desc.elements_per_access *
desc.element_size_bits *
desc.threadmap_warp_size / 8;
return Status::kSuccess;
}
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorParams() {
initialize(LongIndex(0), InterleavedPredicatedTileIteratorDesc());
}
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorParams(Index stride, InterleavedPredicatedTileIteratorDesc desc) {
initialize(stride, desc);
}
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorParams(LongIndex stride, InterleavedPredicatedTileIteratorDesc desc) {
initialize(stride, desc);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper template to construct an OutputTileShapeDesc from a OutputTileThreadMap template.
template <typename Element, typename ThreadMap>
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorDesc make_InterleavedPredicatedTileIteratorDesc() {
return InterleavedPredicatedTileIteratorDesc(
sizeof_bits<Element>::value,
ThreadMap::kElementsPerAccess,
ThreadMap::kWarpSize,
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper template to construct an MakePredicatedTileIteratorDesc from a template
// dependent state
template <typename Element, typename Layout,
typename ThreadMap>
struct MakePredicatedTileIteratorDesc;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for layout::RowMajor output data.
template <typename Element, typename ThreadMap>
struct MakePredicatedTileIteratorDesc <
Element, layout::RowMajor, ThreadMap> {
CUTLASS_HOST_DEVICE
OutputTileThreadMapDesc operator()() {
return make_OutputTileThreadMapDesc<ThreadMap>();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for layout::ColumnMajorInterleaved<InterleavedN> output data.
template <typename Element, typename ThreadMap, int InterleavedN>
struct MakePredicatedTileIteratorDesc <
Element, layout::ColumnMajorInterleaved<InterleavedN>, ThreadMap> {
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIteratorDesc operator()() {
return make_InterleavedPredicatedTileIteratorDesc<Element, ThreadMap>();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 14,496 | C | 29.455882 | 112 | 0.638176 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_thread_map_wmma_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "predicated_tile_iterator.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/pitch_linear.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines the optimal thread map for Wmma TensorOp accumulator layouts
template <
typename ThreadblockShape_,
typename WarpShape_,
typename InstructionShape_,
int PartitionsK,
typename Element_,
int ElementsPerAccess
>
struct DefaultThreadMapWmmaTensorOp {
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
static int const kPartitionsK = PartitionsK;
using Element = Element_;
static int const kElementsPerAccess = ElementsPerAccess;
//
// Definitions
//
struct Detail {
/// Wmma Tensor Operations fundamentally perform operations on InstructionShape::kM rows
static int const kTensorOpRows = InstructionShape::kM;
static int const kWarpSize = 32;
static_assert(
!(ThreadblockShape::kM % WarpShape::kM) &&
!(ThreadblockShape::kN % WarpShape::kN), "Divisibility");
/// Number of warps
using WarpCount = gemm::GemmShape<
ThreadblockShape::kM / WarpShape::kM,
ThreadblockShape::kN / WarpShape::kN,
kPartitionsK
>;
/// Number of participating threads
static int const kThreads = WarpCount::kCount * kWarpSize;
};
//
// ThreadMap
//
/// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap
using Type = OutputTileOptimalThreadMap <
OutputTileShape<ThreadblockShape::kN, Detail::kTensorOpRows, Detail::WarpCount::kM, 1, 1>,
OutputTileShape<1, WarpShape::kM / Detail::kTensorOpRows, 1, 1, WarpShape::kM / Detail::kTensorOpRows>,
Detail::kThreads,
kElementsPerAccess,
sizeof_bits<Element>::value
>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 4,098 | C | 34.95614 | 107 | 0.647877 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/shared_load_iterator_pitch_liner.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
This assumes the shared memory tile is in a permuted layout which avoids bank conflicts on loading.
When the fragment is loaded into registers, it matches the row-major thread map assumed by
the predicated tile iterator writing to global memory.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load output tile from shared memory in epilogue.
///
/// Satisfies: ReadableTileIterator
///
template <typename ThreadMap_, ///< Thread map (conept: PitchLinearThreadMap)
typename Element_, ///< Element data type
int MaxAlignment = ThreadMap_::kElementsPerAccess *sizeof_bits<Element_>::value / 8>
class SharedLoadIteratorPitchLiner {
public:
using ThreadMap = ThreadMap_;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kMinAlignment =
ThreadMap_::kElementsPerAccess * sizeof_bits<Element_>::value / 8;
static int const kAlignment = (MaxAlignment < kMinAlignment ? MaxAlignment : kMinAlignment);
static int const kThreads = ThreadMap::kThreads;
/// Fragment object
using Fragment = Array<Element, ThreadMap::Iterations::kCount * kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, kElementsPerAccess, kAlignment>;
/// Vector type used for SMEM loads
using LoadType =
AlignedArray<Element,
const_min(128 / sizeof_bits<Element>::value, ThreadMap::kElementsPerAccess),
const_min(16, kAlignment)>;
static int const kLoadsPerAccess = AccessType::kElements / LoadType::kElements;
private:
//
// Data members
//
/// Byte-level pointer
uint8_t *byte_pointer_;
/// Stride along adjacent rows
int stride_;
/// Base address offset
Index base_smem_address_;
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
SharedLoadIteratorPitchLiner(TensorRef ref, int thread_idx)
: byte_pointer_(reinterpret_cast<uint8_t *>(ref.data())),
stride_((ref.stride(0) * sizeof_bits<Element>::value) / 8),
base_smem_address_(0) {
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx);
// Initialize pointer
// thread_offset.row() is contiguous dim
// thread_offset.column() is stride dim
byte_pointer_ += thread_offset.row() * sizeof(AccessType) / kElementsPerAccess+
thread_offset.column() * stride_ ;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &offset) {
byte_pointer_ +=
offset.row() * ThreadMap::StorageShape::kContiguous * sizeof(AccessType) / kElementsPerAccess +
offset.column() * ThreadMap::StorageShape::kStrided * stride_;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
uint8_t const *byte_pointer =
byte_pointer_ + s * ThreadMap::Delta::kStrided * stride_ +
c * ThreadMap::Delta::kContiguous * ThreadMap::kElementsPerAccess *
sizeof_bits<Element>::value / 8 +
pointer_offset * sizeof_bits<Element>::value / 8 + base_smem_address_;
int frag_base_idx = s * ThreadMap::Iterations::kContiguous + c;
LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag);
LoadType const *memory_pointer = reinterpret_cast<LoadType const *>(byte_pointer);
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kLoadsPerAccess; ++v) {
frag_ptr[frag_base_idx * kLoadsPerAccess + v] = memory_pointer[v];
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void set_smem_base_address(Index address) { base_smem_address_ = address; }
/// Loads a fragment
CUTLASS_DEVICE
void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 7,394 | C | 36.923077 | 103 | 0.653773 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_direct_store.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs and convolution using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_ ///< Output operator
>
class EpilogueDirectStore {
public:
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
using WarpShape = typename WarpMmaOperator_::Shape;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using OutputOp = OutputOp_;
using Padding = MatrixShape<0, 0>;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
kPartitionsK
>;
/// Use this to control the granularity of one epilogue 'iteration'
static int const kFragmentsPerIteration = 1;
static int constexpr kSmemTiles = 1;
static int constexpr kSmemPointerOffset = 0;
/// Shared storage allocation needed by the epilogue
struct SharedStorage { } ;
private:
// Assume accumulator tile is multipile interleaved 32x32 tile.
static int const kElementsPerPartial = 4;
using EleShapePerPatial = typename platform::conditional<
platform::is_same<ElementAccumulator, float>::value,
MatrixShape<2, 2>,
MatrixShape<1, 4> >::type;
static int const kElementsPerMma = 8;
static int const kAccumulatorPatials = 2;
using QuadShapePerPatialMma = MatrixShape<4, 4>;
static_assert(OutputOp::kCount >= 2,
"The direct store epilogue for Tensor Ops requires the output functor have kCount >= 2.");
private:
LongIndex warp_offset;
int thread_idx;
int warp_idx;
int lane_idx;
int warp_m, warp_n; // warp coordinates within a cta
int tid_m, tid_n; // thread coordinates within a warp
public:
/// Constructor
CUTLASS_DEVICE
EpilogueDirectStore(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx_, ///< ID of a thread within the threadblock
int warp_idx_, ///< ID of warp within threadblock
int lane_idx_ ///< Id of thread within warp
):
thread_idx(thread_idx_),
warp_idx(warp_idx_),
lane_idx(lane_idx_)
{
// warp offsetting calculations
warp_offset = warp_idx * WarpShape::kM * WarpShape::kN;
int warp_id_mn = warp_idx % (WarpCount::kM * WarpShape::kN);
warp_m = warp_id_mn % WarpCount::kM;
warp_n = warp_id_mn / WarpCount::kM;
MatrixCoord warp_offset_coord(warp_m*WarpShape::kM, warp_n*WarpShape::kN);
// thread offsetting calculations
int quad = (lane_idx >> 2);
int lane_in_quad = (lane_idx & 3);
// this seems to be te correct layout
tid_m = quad;
tid_n = 2 * lane_in_quad;
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
if (!output_op.is_source_needed()) {
compute_source_not_needed_(output_op, destination_iterator, accumulators);
}
else {
compute_source_needed_(output_op, destination_iterator, accumulators, source_iterator);
}
}
private:
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
const int kAccumBlockN = 2;
const int kThreadsM = 8;
const int kThreadsN = 4;
const int kBlockM = WarpShape::kM / kThreadsM;
/// Array type used to output
using OutputAccessType = AlignedArray<ElementOutput, kAccumBlockN>;
/// Array type passed to the output operator - unused elements are optimized away
using OutputFragmentType = Array<ElementOutput, OutputOp::kCount>;
/// Array type used by output functor
using AccumulatorAccessType = Array<ElementAccumulator, kAccumBlockN>;
/// Array type used by output functor
using AccumulatorFragmentType = Array<ElementAccumulator, OutputOp::kCount>;
AccumulatorAccessType const *accumulator_pair = reinterpret_cast<AccumulatorAccessType const *>(&accumulators);
CUTLASS_PRAGMA_UNROLL
for (int accum_m_idx = 0; accum_m_idx < WarpShape::kM / kThreadsM; accum_m_idx++) {
int accum_m = kThreadsM * accum_m_idx;
int mL = destination_iterator.threadblock_offset.row() + WarpShape::kM * warp_m + tid_m + accum_m;
int nL_base = destination_iterator.threadblock_offset.column() + WarpShape::kN * warp_n + tid_n;
ElementOutput *output_ptr = destination_iterator.pointer + mL * destination_iterator.stride;
ElementOutput *source_ptr = source_iterator.pointer + mL * source_iterator.stride;
int const kIterationsN = WarpShape::kN / kThreadsN / kAccumBlockN;
CUTLASS_PRAGMA_UNROLL
for (int accum_n_idx = 0; accum_n_idx < kIterationsN; accum_n_idx++) {
int accum_idx = accum_m_idx + kBlockM * accum_n_idx;
int accum_n = kThreadsM * accum_n_idx;
// mL and nL are logical coordinate in 2D mapping of epilogue's 4D output
int nL = nL_base + accum_n;
bool guard = (mL < destination_iterator.extent.row()) && (nL < destination_iterator.extent.column());
AccumulatorFragmentType accum_fragment;
reinterpret_cast<AccumulatorAccessType &>(accum_fragment) = accumulator_pair[accum_idx];
OutputFragmentType output_fragment;
if(guard) {
reinterpret_cast<OutputAccessType &>(output_fragment) =
*reinterpret_cast<OutputAccessType const *>(source_ptr + nL);
}
// Perform output operator
output_fragment = output_op(accum_fragment, output_fragment);
if(guard) {
// Store
*reinterpret_cast<OutputAccessType *>(output_ptr + nL) = reinterpret_cast<OutputAccessType const &>(output_fragment);
}
}
}
}
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
const int kAccumBlockN = 2;
const int kThreadsM = 8;
const int kThreadsN = 4;
const int kBlockM = WarpShape::kM / kThreadsM;
/// Array type used to output
using OutputAccessType = AlignedArray<ElementOutput, kAccumBlockN>;
/// Array type passed to the output operator - unused elements are optimized away
using OutputFragmentType = Array<ElementOutput, OutputOp::kCount>;
/// Array type used by output functor
using AccumulatorAccessType = Array<ElementAccumulator, kAccumBlockN>;
/// Array type used by output functor
using AccumulatorFragmentType = Array<ElementAccumulator, OutputOp::kCount>;
AccumulatorAccessType const *accumulator_pair = reinterpret_cast<AccumulatorAccessType const *>(&accumulators);
CUTLASS_PRAGMA_UNROLL
for (int accum_m_idx = 0; accum_m_idx < WarpShape::kM / kThreadsM; accum_m_idx++) {
int accum_m = kThreadsM * accum_m_idx;
int mL = destination_iterator.threadblock_offset.row() + WarpShape::kM * warp_m + tid_m + accum_m;
int nL_base = destination_iterator.threadblock_offset.column() + WarpShape::kN * warp_n + tid_n;
ElementOutput *output_ptr = destination_iterator.pointer + mL * destination_iterator.stride;
int const kIterationsN = WarpShape::kN / kThreadsN / kAccumBlockN;
CUTLASS_PRAGMA_UNROLL
for (int accum_n_idx = 0; accum_n_idx < kIterationsN; accum_n_idx++) {
int accum_idx = accum_m_idx + kBlockM * accum_n_idx;
int accum_n = kThreadsM * accum_n_idx;
// mL and nL are logical coordinate in 2D mapping of epilogue's 4D output
int nL = nL_base + accum_n;
bool guard = (mL < destination_iterator.extent.row()) && (nL < destination_iterator.extent.column());
AccumulatorFragmentType accum_fragment;
reinterpret_cast<AccumulatorAccessType &>(accum_fragment) = accumulator_pair[accum_idx];
OutputFragmentType output_fragment;
// Perform output operator
output_fragment = output_op(accum_fragment);
if(guard) {
// Store
*reinterpret_cast<OutputAccessType *>(output_ptr + nL) =
reinterpret_cast<OutputAccessType const &>(output_fragment);
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 13,933 | C | 39.04023 | 127 | 0.663174 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/output_tile_thread_map.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Metaprogram for determining the mapping of output elements to threads for epilogue tiles.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/fast_math.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tuple defining point in output tile
template <
int Column,
int Row,
int Group,
int Cluster,
int Tile
>
struct OutputTileShape {
static int const kColumn = Column;
static int const kRow = Row;
static int const kGroup = Group;
static int const kCluster = Cluster;
static int const kTile = Tile;
static int const kCount = kColumn * kRow * kGroup * kCluster * kTile;
};
////////////////////////////////////////////////////////////////////////////////
template <typename Iterations, typename Delta>
struct OutputTileThreadMapHelpers {
/// Determines the iteration index of a vector access according to the thread map
CUTLASS_HOST_DEVICE
static void iteration_index(
int &column_idx,
int &row_idx,
int &group_idx,
int &cluster_idx,
int &tile_idx,
int iter_idx) {
column_idx = iter_idx % Iterations::kColumn;
int residual = iter_idx / Iterations::kColumn;
row_idx = residual % Iterations::kRow;
residual = residual / Iterations::kRow;
group_idx = residual % Iterations::kGroup;
residual = residual / Iterations::kGroup;
cluster_idx = residual % Iterations::kCluster;
tile_idx = residual / Iterations::kCluster;
}
/// Computes the offset of a given vector access
CUTLASS_HOST_DEVICE
static MatrixCoord iteration_offset(int iter_idx) {
int column_idx;
int row_idx;
int group_idx;
int cluster_idx;
int tile_idx;
iteration_index(column_idx, row_idx, group_idx, cluster_idx, tile_idx, iter_idx);
return
MatrixCoord(
row_idx * Delta::kRow +
group_idx * Delta::kGroup +
cluster_idx * Delta::kCluster +
tile_idx * Delta::kTile,
column_idx * Delta::kColumn);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ThreadMap_,
typename Shape_,
typename Iterations_,
typename Delta_,
typename Count_
>
struct OutputTileThreadMap : public OutputTileThreadMapHelpers<Iterations_, Delta_> {
/// Conventional thread map (concept: ThreadMap)
using ThreadMap = ThreadMap_;
/// Number of threads participating in the operation
static int const kThreads = ThreadMap::kThreads;
/// Number of scalar elements per access
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
/// Shape of the tile
using Shape = Shape_;
/// Iterations performed by each thread
using Iterations = Iterations_;
/// Delta between accesses
using Delta = Delta_;
/// Number of iterator iterations
using Count = Count_;
/// Initial offset function
CUTLASS_HOST_DEVICE
static MatrixCoord initial_offset(int thread_idx) {
using Index = typename layout::PitchLinearCoord::Index;
layout::PitchLinearCoord coord = ThreadMap::initial_offset(thread_idx);
Index cluster = coord.strided() / (Shape::kGroup * Shape::kRow);
Index cluster_residual = coord.strided() % (Shape::kGroup * Shape::kRow);
Index group = cluster_residual / (Shape::kRow);
Index row = cluster_residual % (Shape::kRow);
return MatrixCoord{
row + group * Shape::kRow * Count::kRow
+ cluster * Shape::kGroup * Count::kGroup * Shape::kRow * Count::kRow,
coord.contiguous()
};
}
};
////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// RowArrangement determines how one or more warps cover a region of consecutive rows.
template <
typename Shape,
int WarpsRemaining,
int ElementsPerAccess,
int ElementSize,
bool Is2dTile
>
struct RowArrangement;
/// RowArrangement in which each warp's access is a 1D tiled arrangement.
template <
typename Shape,
int WarpsRemaining,
int ElementsPerAccess,
int ElementSize
>
struct RowArrangement<Shape, WarpsRemaining, ElementsPerAccess, ElementSize, false> {
static int const kWarpSize = 32;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kElementSize = ElementSize;
static int const kIterationsRow = 1;
static int const kDeltaRow = 1;
static int const kIterationsColumn = Shape::kColumn / kElementsPerAccess / kWarpSize;
static int const kDeltaColumn = kWarpSize * kElementsPerAccess;
static int const kAccessWidth = kWarpSize;
static int const kAccessRows = 1;
static int const kWarpPartitionsRow = 1;
static int const kWarpPartitionsColumn = WarpsRemaining;
};
/// RowArrangement in which each warp's access is a 2D tiled arrangement.
template <
typename Shape,
int WarpsRemaining,
int ElementsPerAccess,
int ElementSize
>
struct RowArrangement<Shape, WarpsRemaining, ElementsPerAccess, ElementSize, true> {
static int const kMemoryAccessSize = 256; // Preferred access size
static int const kWarpSize = 32;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kElementSize = ElementSize;
struct Detail {
static int const kShapeRow = Shape::kRow / WarpsRemaining;
static int const kShapeWidth = Shape::kColumn / kElementsPerAccess;
static int const kTargetMemoryAccessWidth =
kMemoryAccessSize / (kElementsPerAccess * kElementSize / 8);
static int const kTargetAccessRows = kWarpSize / kTargetMemoryAccessWidth;
};
static int const kAccessWidth =
(Detail::kTargetAccessRows > Detail::kShapeRow ?
kWarpSize / Detail::kShapeRow
: const_min(
Detail::kShapeWidth,
const_min(kWarpSize, kMemoryAccessSize / (kElementsPerAccess * kElementSize / 8))
));
static int const kAccessRows =
(Detail::kTargetAccessRows > Detail::kShapeRow ?
Detail::kShapeRow
: const_min(Shape::kRow, kWarpSize / kAccessWidth));
static int const kIterationsRow = Detail::kShapeRow / kAccessRows;
static int const kDeltaRow = kAccessRows;
static int const kIterationsColumn = Detail::kShapeWidth / kAccessWidth;
static int const kDeltaColumn = kAccessWidth * kElementsPerAccess;
static_assert( kAccessWidth * kElementsPerAccess <= Shape::kColumn, "Accessing too many elements per access");
static_assert( kIterationsColumn > 0, "Iteration Count Column must be > 0" );
static_assert( kIterationsRow > 0, "Iteration Count Row must be > 0" );
static int const kWarpPartitionsRow = 1;
static int const kWarpPartitionsColumn = 1;
};
}
////////////////////////////////////////////////////////////////////////////////
/// Template metaprogram for partitioning a 4D space across warps to achieve several performance
/// objectives:
///
/// - coalesced memory accesses in units of 128 Byte lines
/// - minimal address arithmetic
/// - minimal predicate calculations
///
template <
typename Shape_,
typename Count_,
int Threads,
int ElementsPerAccess,
int ElementSize
>
struct OutputTileOptimalThreadMap {
using Shape = Shape_;
using Count = Count_;
static int const kWarpSize = 32;
static int const kThreads = Threads;
static int const kWarpCount = kThreads / kWarpSize;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kElementSize = ElementSize;
//
// Metaprogram computation
//
struct Detail {
// Clusters
static int const kIterationsCluster =
((Shape::kCluster > kWarpCount) ?
Shape::kCluster / kWarpCount
: 1);
static int const kDeltaCluster =
((Shape::kCluster > kWarpCount) ?
Shape::kRow * Count::kRow * Shape::kGroup * Count::kGroup * Shape::kCluster / kIterationsCluster
: 1);
static int const kCompactedDeltaCluster =
((Shape::kCluster > kWarpCount) ?
Shape::kRow * Shape::kGroup * Shape::kCluster / kIterationsCluster
: 1);
static int const kWarpPartitionsCluster =
((Shape::kCluster > kWarpCount) ?
kWarpCount
: kWarpCount / Shape::kCluster);
static int const kWarpsRemainingForGroups =
((Shape::kCluster > kWarpCount) ? 1 : kWarpCount / Shape::kCluster);
// Groups
static int const kIterationsGroup =
((Shape::kGroup > kWarpsRemainingForGroups) ?
Shape::kGroup / kWarpsRemainingForGroups
: 1);
static int const kDeltaGroup =
((Shape::kGroup > kWarpsRemainingForGroups) ?
Shape::kRow * Count::kRow * Shape::kGroup / kIterationsGroup
: 1);
static int const kCompactedDeltaGroup =
((Shape::kGroup > kWarpsRemainingForGroups) ?
Shape::kRow * Shape::kGroup / kIterationsGroup
: 1);
static int const kWarpPartitionsGroup =
((Shape::kGroup > kWarpsRemainingForGroups) ?
1
: kWarpsRemainingForGroups / Shape::kGroup);
static int const kWarpsRemainingForRows =
((Shape::kGroup > kWarpsRemainingForGroups) ?
1
: kWarpsRemainingForGroups / Shape::kGroup);
// Rows
using RowArrangement = detail::RowArrangement<
Shape,
kWarpsRemainingForRows,
kElementsPerAccess,
kElementSize,
(Shape::kRow > kWarpsRemainingForRows)
>;
// Warp partitions
using WarpPartitions = OutputTileShape<
RowArrangement::kWarpPartitionsColumn,
RowArrangement::kWarpPartitionsRow,
kWarpPartitionsGroup,
kWarpPartitionsCluster,
1>;
static int const kAccessWidth = RowArrangement::kAccessWidth;
static int const kAccessRows = RowArrangement::kAccessRows;
};
//
// Output
//
using Iterations = OutputTileShape<
Detail::RowArrangement::kIterationsColumn,
Detail::RowArrangement::kIterationsRow,
Detail::kIterationsGroup,
Detail::kIterationsCluster,
1>;
using Delta = OutputTileShape<
Detail::RowArrangement::kDeltaColumn,
Detail::RowArrangement::kDeltaRow,
Detail::kDeltaGroup,
Detail::kDeltaCluster,
1>;
/// Initial offset function
CUTLASS_DEVICE
static MatrixCoord initial_offset(int thread_idx) {
int warp_idx = __shfl_sync(0xffffffff, thread_idx / kWarpSize, 0);
int lane_idx = thread_idx % kWarpSize;
// Compute warp location
int cluster_idx = warp_idx / Detail::WarpPartitions::kCluster;
int residual_cluster = warp_idx % Detail::WarpPartitions::kCluster;
int group_idx = residual_cluster / Detail::WarpPartitions::kGroup;
int residual_group = residual_cluster % Detail::WarpPartitions::kGroup;
int row_idx = residual_group / Detail::WarpPartitions::kRow;
int col_idx = residual_group % Detail::WarpPartitions::kRow;
// Compute per-lane offset
int lane_row_offset = lane_idx / Detail::kAccessWidth;
int lane_col_offset = lane_idx % Detail::kAccessWidth;
// Compute coordinate in output space
int cluster_offset = cluster_idx * Shape::kRow * Count::kRow * Shape::kGroup * Count::kGroup;
int group_offset = group_idx * Shape::kRow * Count::kRow;
int row_offset = row_idx * Iterations::kRow * Detail::kAccessRows;
int column_offset = col_idx * Iterations::kColumn * Detail::kAccessWidth * kElementsPerAccess;
return MatrixCoord(
cluster_offset + group_offset + row_offset + lane_row_offset,
column_offset + lane_col_offset * kElementsPerAccess
);
}
/// Computes the offset of a given vector access
CUTLASS_HOST_DEVICE
static MatrixCoord iteration_offset(int iter_idx) {
return OutputTileThreadMapHelpers<Iterations, Delta>::iteration_offset(iter_idx);
}
/// Compacted thread map in which the 4D region is contiguous
struct CompactedThreadMap {
using Shape = Shape_;
using TileShape = MatrixShape<
Shape::kTile * Shape::kCluster * Shape::kGroup * Shape::kRow,
Shape::kColumn
>;
using Iterations = OutputTileShape<
Detail::RowArrangement::kIterationsColumn,
Detail::RowArrangement::kIterationsRow,
Detail::kIterationsGroup,
Detail::kIterationsCluster,
1>;
using Delta = OutputTileShape<
Detail::RowArrangement::kDeltaColumn,
Detail::RowArrangement::kDeltaRow,
Detail::kCompactedDeltaGroup,
Detail::kCompactedDeltaCluster,
1>;
/// Number of elements within each vector access
static int const kElementsPerAccess = ElementsPerAccess;
/// Number of threads
static int const kThreads = Threads;
/// Function to compute each thread's initial offset
CUTLASS_DEVICE
static MatrixCoord initial_offset(int thread_idx) {
int warp_idx = __shfl_sync(0xffffffff, thread_idx / kWarpSize, 0);
int lane_idx = thread_idx % kWarpSize;
// Compute warp location
int cluster_idx = warp_idx / Detail::WarpPartitions::kCluster;
int residual_cluster = warp_idx % Detail::WarpPartitions::kCluster;
int group_idx = residual_cluster / Detail::WarpPartitions::kGroup;
int residual_group = residual_cluster % Detail::WarpPartitions::kGroup;
int row_idx = residual_group / Detail::WarpPartitions::kRow;
int col_idx = residual_group % Detail::WarpPartitions::kRow;
// Compute per-lane offset
int lane_row_offset = lane_idx / Detail::kAccessWidth;
int lane_col_offset = lane_idx % Detail::kAccessWidth;
// Compute coordinate in output space
int cluster_offset = cluster_idx * Shape::kRow * Shape::kGroup;
int group_offset = group_idx * Shape::kRow;
int row_offset = row_idx * Iterations::kRow * Detail::kAccessRows;
int column_offset = col_idx * Iterations::kColumn * Detail::kAccessWidth * kElementsPerAccess;
MatrixCoord coord(
cluster_offset + group_offset + row_offset + lane_row_offset,
column_offset + lane_col_offset * kElementsPerAccess
);
return coord;
}
};
};
////////////////////////////////////////////////////////////////////////////////
/// Template metaprogram for partitioning a 3D interleaved layout across warps
/// to achieve several performance objectives:
///
/// - coalesced memory accesses in units of 64 Byte lines
/// - minimal address arithmetic
/// - minimal predicate calculations
///
template <typename WarpCount_, typename Iterations_, int Threads,
int ElementsPerAccess, int ElementSize>
struct InterleavedOutputTileThreadMap {
using WarpCount = WarpCount_;
static int const kWarpSize = 32;
static int const kThreads = Threads;
static int const kWarpCount = kThreads / kWarpSize;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kElementSize = ElementSize;
//
// Metaprogram computation
//
struct Detail {};
//
// Output
//
using Iterations = Iterations_;
using Delta = layout::PitchLinearShape<kWarpSize * kElementsPerAccess, 1>;
/// Initial offset function
CUTLASS_HOST_DEVICE
static layout::PitchLinearCoord initial_offset(int thread_idx) {
int warp_idx = thread_idx / kWarpSize;
int lane_idx = thread_idx % kWarpSize;
// Compute warp location
layout::PitchLinearCoord warp_footprint{
Delta::kContiguous * Iterations::kContiguous,
Delta::kStrided * Iterations::kStrided};
layout::PitchLinearCoord warp_offset{warp_idx % WarpCount::kContiguous,
warp_idx / WarpCount::kContiguous};
// Compute per-lane offset
layout::PitchLinearCoord thread_offset_in_warp{
lane_idx * kElementsPerAccess, 0};
layout::PitchLinearCoord thread_offset_in_threadblock_tile =
warp_footprint * warp_offset + thread_offset_in_warp;
return thread_offset_in_threadblock_tile;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Template metaprogram for partitioning a 4D interleaved layout across warps
/// to achieve several performance objectives:
///
/// - coalesced memory accesses in units of 64 Byte lines
/// - minimal address arithmetic
/// - minimal predicate calculations
///
template <typename WarpCount_, typename Iterations_, int Threads,
int ElementsPerAccess, int ElementSize>
struct InterleavedConvOutputTileThreadMap {
using WarpCount = WarpCount_;
static int const kWarpSize = 32;
static int const kThreads = Threads;
static int const kWarpCount = kThreads / kWarpSize;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kElementSize = ElementSize;
//
// Metaprogram computation
//
struct Detail {};
//
// Output
//
using Iterations = Iterations_;
using Delta = MatrixShape<kWarpSize / 4, 4 * kElementsPerAccess>;
/// Initial offset function
CUTLASS_HOST_DEVICE
static MatrixCoord initial_offset(int thread_idx) {
int warp_idx = thread_idx / kWarpSize;
int lane_idx = thread_idx % kWarpSize;
// Compute warp location
MatrixCoord warp_footprint{
Delta::kRow * Iterations::kRow,
Delta::kColumn * Iterations::kColumn,
};
MatrixCoord warp_offset{warp_idx % WarpCount::kRow,
warp_idx / WarpCount::kRow};
// Compute per-lane offset
MatrixCoord thread_offset_in_warp{lane_idx / 4,
(lane_idx % 4) * kElementsPerAccess};
MatrixCoord thread_offset_in_threadblock_tile =
warp_footprint * warp_offset + thread_offset_in_warp;
return thread_offset_in_threadblock_tile;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
| 19,750 | C | 30.500797 | 112 | 0.666127 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
The shared memory resource is time-sliced across warps.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/epilogue_base_streamk.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large
(!IsEpilogueFunctorHeavy<OutputOp_>::value)
>
class Epilogue :
public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>,
public EpilogueBaseStreamK<
Shape_,
PartitionsK,
WarpMmaOperator_,
AccumulatorFragmentIterator_>
{
public:
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
using BaseStreamK = EpilogueBaseStreamK<
Shape_,
PartitionsK,
WarpMmaOperator_,
AccumulatorFragmentIterator_>;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// Number of warps per block
using WarpCount = typename Base::WarpCount;
/// Number of threads per block
static int const kBlockThreads = 32 * WarpCount::kCount;
/// Per-thread accumulator tile type
using AccumulatorTile = typename Base::AccumulatorTile;
/// Numerical accumulation element type
using ElementAccumulator = typename WarpMmaOperator::ElementC;
/// Fragment type used by the accumulator tile's fragment iterator
using AccumulatorFragment = typename AccumulatorFragmentIterator::Fragment;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Vector type used by the global output iterator
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Vector type used by the shared output iterator
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
public:
static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
static_assert(kPartitionsK == 1 || Base::kFragmentsPerIteration == 1, "One of these must be exactly 1.");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Thread index in the threadblock
int thread_idx;
/// Warp index in the threadblock
int warp_idx;
public:
/// Constructor
CUTLASS_DEVICE
Epilogue(
typename Base::SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx) ///< Id of thread within warp
:
Base(shared_storage, thread_idx, warp_idx, lane_idx),
BaseStreamK(thread_idx),
shared_load_iterator_(shared_storage.reference(), thread_idx),
thread_idx(thread_idx),
warp_idx(warp_idx)
{}
/// Aggregates the accumulator sets shared by peer blocks in the global workspace,
/// performing epilogue computations, writing to output
CUTLASS_DEVICE
void reduce(
int peer_idx_begin,
int peer_idx_end,
int reduce_fragment_idx,
void *element_workspace,
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
OutputTileIterator source_iterator) ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
{
// Redcuce peer accumulator fragments into one fragment
AccumulatorFragment accum_fragment;
BaseStreamK::reduce(accum_fragment, peer_idx_begin, peer_idx_end, reduce_fragment_idx, element_workspace);
// Store fragment to shared memory
this->warp_tile_iterator_.store(accum_fragment);
__syncthreads();
// Initialize/load source-fragment data
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
if (output_op.is_source_needed())
{
source_iterator += reduce_fragment_idx;
source_iterator.load(source_fragment);
}
// Load fragment from shared memory
typename SharedLoadIterator::Fragment aligned_accum_fragment;
shared_load_iterator_.load(aligned_accum_fragment);
// Add fragments shared by other k partitions
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
typename SharedLoadIterator::Fragment aligned_addend_fragment;
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_addend_fragment);
aligned_accum_fragment = add_fragments(aligned_accum_fragment, aligned_addend_fragment);
}
}
// Compute the output result
typename OutputTileIterator::Fragment output_fragment;
// Apply the output operator
apply_output_operator(output_fragment, output_op, aligned_accum_fragment, source_fragment);
// Store the final result
destination_iterator += reduce_fragment_idx;
destination_iterator.store(output_fragment);
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator ) ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
{
if (!output_op.is_source_needed())
{
source_iterator.clear_mask();
__syncthreads(); // Dummy (CUDA 11.0)
}
// Source-fragment data (zero-initialized for scenarios where the
// output operator allows us to skip loading it from global input)
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
// Iterator over warp-level accumulator fragment
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; iter += Base::kFragmentsPerIteration)
{
//
// Convert and store fragment
//
__syncthreads();
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p)
{
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
this->warp_tile_iterator_.store(accum_fragment);
if (p < Base::kFragmentsPerIteration - 1) {
this->warp_tile_iterator_.add_pointer_offset(kSmemPointerOffset);
}
}
if (Base::kFragmentsPerIteration > 1) {
this->warp_tile_iterator_.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration));
}
//
// Load fragments from shared memory
//
__syncthreads();
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p)
{
// Load addend source fragment from global memory
source_iterator.load(source_fragment);
++source_iterator;
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
if (p < Base::kFragmentsPerIteration - 1)
{
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
}
else if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Compute the output result
//
typename OutputTileIterator::Fragment output_fragment;
apply_output_operator(output_fragment, output_op, aligned_accum_fragment[0], source_fragment);
//
// Store the final result
//
destination_iterator.store(output_fragment);
++destination_iterator;
}
if (Base::kFragmentsPerIteration > 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration));
}
}
}
private:
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator(
typename OutputTileIterator::Fragment &output_fragment,
OutputOp const &output_op, ///< Output operator
typename SharedLoadIterator::Fragment const &aligned_accum_fragment,
typename OutputTileIterator::Fragment const &source_fragment)
{
OutputAccessType *output_frag_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment);
AccumulatorAccessType const *compute_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
OutputAccessType const *source_frag_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i)
{
// Call the output operator
output_frag_ptr[i] = output_op(compute_frag_ptr[i], source_frag_ptr[i]);
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 15,628 | C | 35.262181 | 128 | 0.677694 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_predicates.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief PredicatedTileIteratorPredicates.
PredicatedTileIteratorPredicates enables both upper and lower bounds for predicates.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator predicates used to bound computations in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_ ///< Element data type
>
class PredicatedTileIteratorPredicates {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0");
static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0");
static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0");
static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout):
PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()
)
{
}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorParams params_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index lower_extent_row_;
Index upper_extent_row_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// Internal state counter
int state_[3];
//
// Static asserts about internal strides
//
static_assert(sizeof(lower_extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(upper_extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorPredicates(
PredicatedTileIteratorParams const & params,
TensorCoord lower_extent,
TensorCoord upper_extent,
int thread_idx,
TensorCoord threadblock_offset = TensorCoord()
):
params_(params)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
lower_extent_row_ = lower_extent.row();
upper_extent_row_ = upper_extent.row();
thread_start_row_ = thread_offset.row();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] = ((thread_offset.column()
+ ThreadMap::Delta::kColumn * c) < upper_extent.column()) &&
((thread_offset.column() + ThreadMap::Delta::kColumn * c) >= lower_extent.column());
}
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorPredicates &operator++() {
++state_[0];
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Gets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
///< Gets lower_extent_row_
CUTLASS_DEVICE Index get_lower_extent_row() {
return lower_extent_row_;
}
///< Gets upper_extent_row_
CUTLASS_DEVICE Index get_upper_extent_row() {
return upper_extent_row_;
}
///< Gets thread_start_row_
CUTLASS_DEVICE Index get_thread_start_row() {
return thread_start_row_;
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 9,146 | C | 28.506452 | 100 | 0.640171 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops on Volta.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/layout/permute.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess,
bool ScatterD = false,
typename PermuteDLayout = layout::NoPermute
>
struct DefaultEpilogueVoltaTensorOp {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess,
ElementAccumulator
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8;
static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B");
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator,
kSharedMemAlignment
>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueVoltaTensorOpStridedDgrad {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess,
ElementAccumulator
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad<
OutputTileThreadMap,
ElementOutput
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8;
static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B");
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator,
kSharedMemAlignment
>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
int Rank,
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueVoltaTensorOpAffineRankN {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess,
ElementAccumulator
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankN<
OutputTileThreadMap,
ElementOutput,
Rank
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8;
static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B");
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator,
kSharedMemAlignment
>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 10,846 | C | 31.091716 | 123 | 0.705606 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_with_broadcast.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#include <cuda/std/utility>
#else
#include <assert.h>
#include <utility>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/numeric_types.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This base class is meant to define the concept required of the
/// EpilogueWithBroadcast::OutputOp
template <
typename ElementC_,
typename ElementAccumulator_,
typename ElementCompute_,
typename ElementZ_,
typename ElementT_,
int ElementsPerAccess,
bool StoreZ = true,
bool StoreT = true
>
struct EpilogueWithBroadcastOpBase {
using ElementOutput = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementZ = ElementZ_;
using ElementT = ElementT_;
static int const kElementsPerAccess = ElementsPerAccess;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementOutput, kElementsPerAccess>;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using FragmentT = Array<ElementT, kElementsPerAccess>;
/// If true, the 'Z' tensor is stored
static bool const kStoreZ = StoreZ;
/// If true, the 'T' tensor is stored
static bool const kStoreT = StoreT;
/// Parameters structure - required
struct Params { };
//
// Methods
//
/// Constructor from Params
EpilogueWithBroadcastOpBase(Params const ¶ms_) { }
/// Determine if the source is needed. May return false if
bool is_source_needed() const {
return true;
}
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) { }
/// Applies the operation when is_source_needed() is true
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentC const &frag_C1,
FragmentC const &frag_C2,
FragmentCompute const &V) const {
}
/// Applies the operation when is_source_needed() is false
CUTLASS_HOST_DEVICE
void operator()(
FragmentZ &frag_Z,
FragmentT &frag_T,
FragmentAccumulator const &AB,
FragmentCompute const &V) const {
}
};
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator with bias vector broadcast over columns.
///
/// Computes the following:
///
///
/// Z, T = OutputOp(AB, C, Broadcast)
///
/// if (ElementwiseOp::kStoreZ) {
/// store(converted_u);
/// }
///
/// if (ElementwiseOp::kStoreT) {
/// store(v);
/// }
///
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors (z)
typename TensorTileIterator_, ///< Additional tile iterator for tensor-valued operands (t)
typename ElementVector_, ///< Pointer to broadcast vector
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator - concept is EpilogueWithBroadcastOp
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large
(!IsEpilogueFunctorHeavy<OutputOp_>::value),
bool IsSingleSource = OutputOp_::kIsSingleSource
>
class EpilogueWithBroadcast;
template <
typename Shape_,
typename WarpMmaOperator_,
int PartitionsK,
typename OutputTileIterator_,
typename TensorTileIterator_,
typename ElementVector_,
typename AccumulatorFragmentIterator_,
typename WarpTileIterator_,
typename SharedLoadIterator_,
typename OutputOp_,
typename Padding_,
int FragmentsPerPartition,
int IterationsUnroll
>
class EpilogueWithBroadcast<
Shape_,
WarpMmaOperator_,
PartitionsK,
OutputTileIterator_,
TensorTileIterator_,
ElementVector_,
AccumulatorFragmentIterator_,
WarpTileIterator_,
SharedLoadIterator_,
OutputOp_,
Padding_,
FragmentsPerPartition,
IterationsUnroll,
false
> :
public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition> {
public:
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
static bool const kIsSingleSource = false;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using TensorTileIterator = TensorTileIterator_;
using ElementVector = ElementVector_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Compute data type produced by the output op
using ElementCompute = typename OutputOp::ElementCompute;
/// Compute fragment
using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>;
/// Thread map used by output tile iterators
using ThreadMap = typename OutputTileIterator::ThreadMap;
/// Fragment object used to store the broadcast values
using BroadcastFragment = Array<
ElementCompute,
ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Data type of additional tensor
using ElementTensor = typename TensorTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>;
/// Tensor access type
using TensorAccessType = Array<ElementTensor, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
/// Shared memory allocation from epilogue base class
using BaseSharedStorage = typename Base::SharedStorage;
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
/// Used for the broadcast
struct BroadcastDetail {
/// Number of threads per warp
static int const kWarpSize = 32;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
/// Number of distinct scalar column indices handled by each thread
static int const kColumnsPerThread = ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess;
/// Number of distinct scalar row indices handled by each thread
static int const kRowsPerThread = ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn;
/// Number of threads per threadblock
static int const kThreadCount = kWarpSize * WarpCount::kCount;
/// Number of distinct threads per row of output tile
static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread);
/// Number of distinct threads which must be reduced during the final reduction phase within the threadblock.
static int const kThreadRows = kThreadCount / kThreadsPerRow;
/// I'm not sure what I meant here.
static int const kThreadAccessesPerRow = const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount);
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
kThreadRows,
Shape::kN
>;
/// Debug printing
CUTLASS_DEVICE
static void print() {
#if 0
printf("BroadcastDetail {\n");
printf(
" kColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n"
"kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n",
kColumnsPerThread,
kRowsPerThread,
kThreadCount,
kThreadsPerRow,
kThreadRows,
kThreadAccessesPerRow,
StorageShape::kRow,
StorageShape::kColumn,
StorageShape::kCount
);
printf("};\n");
#endif
}
};
/// Shared storage structure (shadows base) with additional SMEM buffer for reduction
struct SharedStorage {
union {
BaseSharedStorage base;
};
CUTLASS_HOST_DEVICE
SharedStorage() { }
};
public:
static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Thread index within the threadblock
int thread_idx_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithBroadcast(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
Base(shared_storage.base, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.base.reference(), thread_idx),
thread_idx_(thread_idx)
{
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
ElementVector const * broadcast_ptr, ///< Broadcast vector
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator1, ///< Tile iterator for first source accumulator matrix
OutputTileIterator source_iterator2, ///< Tile iterator for second source accumulator matrix
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord()) {
BroadcastFragment broadcast_fragment;
load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset);
if (!output_op.is_source_needed()) {
compute_source_not_needed_(
output_op,
broadcast_fragment,
destination_iterator,
accumulators,
tensor_iterator);
}
else {
compute_source_needed_(
output_op,
broadcast_fragment,
destination_iterator,
accumulators,
source_iterator1,
source_iterator2,
tensor_iterator);
}
}
private:
CUTLASS_DEVICE
void load_broadcast_fragment_(
BroadcastFragment & broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
ElementVector const * broadcast_ptr, ///< Broadcast vector
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space
) {
broadcast_fragment.clear();
// If no pointer is supplied, set with all zeros and avoid memory accesses
if (!broadcast_ptr) {
return;
}
int thread_initial_column = ThreadMap::initial_offset(thread_idx_).column();
int thread_column_idx = threadblock_offset.column() + thread_initial_column;
broadcast_ptr += thread_initial_column;
NumericArrayConverter<ElementCompute, ElementVector, BroadcastDetail::kElementsPerAccess> converter;
using AccessType = AlignedArray<ElementVector, BroadcastDetail::kElementsPerAccess>;
using ComputeFragmentType = Array<ElementCompute, BroadcastDetail::kElementsPerAccess>;
ComputeFragmentType *frag_ptr = reinterpret_cast<ComputeFragmentType *>(&broadcast_fragment);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < ThreadMap::Iterations::kColumn; ++j) {
AccessType loaded;
loaded.clear();
if (thread_column_idx < problem_size.column()) {
loaded = *reinterpret_cast<AccessType const *>(broadcast_ptr);
}
ComputeFragmentType cvt = converter(loaded);
frag_ptr[j] = cvt;
thread_column_idx += ThreadMap::Delta::kColumn;
broadcast_ptr += ThreadMap::Delta::kColumn;
}
}
template <class Seq>
struct acc2smem_source_not_needed;
template <size_t... Seq>
struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> {
template <int Advance>
CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
warp_tile_iterator.store(accum_fragment);
if (p < Base::kFragmentsPerIteration - 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset);
}
}
if (Base::kFragmentsPerIteration > 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset *
(1 - Base::kFragmentsPerIteration));
}
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {
(pos == (Seq * Base::kFragmentsPerIteration)) &&
(helper<Seq * Base::kFragmentsPerIteration>(iterator_begin, warp_tile_iterator), 0)...};
CUTLASS_UNUSED(dummy[0]);
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const &output_op, ///< Output operator
BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
) {
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
// CUTLASS_PRAGMA_UNROLL
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; iter += Base::kFragmentsPerIteration) {
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_not_needed<
cutlass::make_index_sequence<OutputTileIterator::kIterations /
Base::kFragmentsPerIteration>>::push(iter,
accum_fragment_iterator,
this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
if (p < Base::kFragmentsPerIteration - 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
}
else if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
apply_output_operator_source_not_needed_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
broadcast_fragment);
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator.store(frag_Z);
++destination_iterator;
}
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
if (Base::kFragmentsPerIteration > 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration));
}
}
}
template<class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator1, ///< Tile iterator for first source accumulator matrix
OutputTileIterator source_iterator2, ///< Tile iterator for second source accumulator matrix
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
) {
typename OutputTileIterator::Fragment source_fragment1;
source_fragment1.clear();
typename OutputTileIterator::Fragment source_fragment2;
source_fragment2.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
source_iterator1.load(source_fragment1);
++source_iterator1;
source_iterator2.load(source_fragment2);
++source_iterator2;
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_tile_offset({tile_row_offset , 0});
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0});
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
apply_output_operator_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
source_fragment1,
source_fragment2,
broadcast_fragment);
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator.store(frag_Z);
++destination_iterator;
}
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
typename OutputTileIterator::Fragment &frag_Z,
typename TensorTileIterator::Fragment &frag_T,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &frag_AB,
typename OutputTileIterator::Fragment const &frag_C1,
typename OutputTileIterator::Fragment const &frag_C2,
BroadcastFragment const &frag_Broadcast) {
using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>;
using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z);
AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T);
AccumulatorAccessType const *frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&frag_AB);
OutputAccessType const *frag_C1_ptr =
reinterpret_cast<OutputAccessType const *>(&frag_C1);
OutputAccessType const *frag_C2_ptr =
reinterpret_cast<OutputAccessType const *>(&frag_C2);
AccessTypeBroadcast const *frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(
frag_Z_ptr[i],
frag_T_ptr[i],
frag_AB_ptr[i],
frag_C1_ptr[i],
frag_C2_ptr[i],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed_(
typename OutputTileIterator::Fragment &frag_Z,
typename TensorTileIterator::Fragment &frag_T,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &frag_AB,
BroadcastFragment const &frag_Broadcast) {
using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>;
using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z);
AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T);
AccumulatorAccessType const *frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&frag_AB);
AccessTypeBroadcast const *frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(
frag_Z_ptr[i],
frag_T_ptr[i],
frag_AB_ptr[i],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
};
template <
typename Shape_,
typename WarpMmaOperator_,
int PartitionsK,
typename OutputTileIterator_,
typename TensorTileIterator_,
typename ElementVector_,
typename AccumulatorFragmentIterator_,
typename WarpTileIterator_,
typename SharedLoadIterator_,
typename OutputOp_,
typename Padding_,
int FragmentsPerPartition,
int IterationsUnroll
>
class EpilogueWithBroadcast<
Shape_,
WarpMmaOperator_,
PartitionsK,
OutputTileIterator_,
TensorTileIterator_,
ElementVector_,
AccumulatorFragmentIterator_,
WarpTileIterator_,
SharedLoadIterator_,
OutputOp_,
Padding_,
FragmentsPerPartition,
IterationsUnroll,
true
> :
public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition> {
public:
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
static bool const kIsSingleSource = true;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using TensorTileIterator = TensorTileIterator_;
using ElementVector = ElementVector_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Compute data type produced by the output op
using ElementCompute = typename OutputOp::ElementCompute;
/// Compute fragment
using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>;
/// Thread map used by output tile iterators
using ThreadMap = typename OutputTileIterator::ThreadMap;
/// Fragment object used to store the broadcast values
using BroadcastFragment = Array<
ElementCompute,
ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Data type of additional tensor
using ElementTensor = typename TensorTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>;
/// Tensor access type
using TensorAccessType = Array<ElementTensor, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
/// Shared memory allocation from epilogue base class
using BaseSharedStorage = typename Base::SharedStorage;
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
/// Used for the broadcast
struct BroadcastDetail {
/// Number of threads per warp
static int const kWarpSize = 32;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
/// Number of distinct scalar column indices handled by each thread
static int const kColumnsPerThread = ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess;
/// Number of distinct scalar row indices handled by each thread
static int const kRowsPerThread = ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn;
/// Number of threads per threadblock
static int const kThreadCount = kWarpSize * WarpCount::kCount;
/// Number of distinct threads per row of output tile
static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread);
/// Number of distinct threads which must be reduced during the final reduction phase within the threadblock.
static int const kThreadRows = kThreadCount / kThreadsPerRow;
/// I'm not sure what I meant here.
static int const kThreadAccessesPerRow = const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount);
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
kThreadRows,
Shape::kN
>;
/// Debug printing
CUTLASS_DEVICE
static void print() {
#if 0
printf("BroadcastDetail {\n");
printf(
" kColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n"
"kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n",
kColumnsPerThread,
kRowsPerThread,
kThreadCount,
kThreadsPerRow,
kThreadRows,
kThreadAccessesPerRow,
StorageShape::kRow,
StorageShape::kColumn,
StorageShape::kCount
);
printf("};\n");
#endif
}
};
/// Shared storage structure (shadows base) with additional SMEM buffer for reduction
struct SharedStorage {
union {
BaseSharedStorage base;
};
CUTLASS_HOST_DEVICE
SharedStorage() { }
};
public:
static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Thread index within the threadblock
int thread_idx_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithBroadcast(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
Base(shared_storage.base, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.base.reference(), thread_idx),
thread_idx_(thread_idx)
{
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
ElementVector const * broadcast_ptr, ///< Broadcast vector
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord()) {
BroadcastFragment broadcast_fragment;
load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset);
if (!output_op.is_source_needed()) {
compute_source_not_needed_(
output_op,
broadcast_fragment,
destination_iterator,
accumulators,
tensor_iterator);
}
else {
compute_source_needed_(
output_op,
broadcast_fragment,
destination_iterator,
accumulators,
source_iterator,
tensor_iterator);
}
}
private:
CUTLASS_DEVICE
void load_broadcast_fragment_(
BroadcastFragment & broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
ElementVector const * broadcast_ptr, ///< Broadcast vector
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space
) {
broadcast_fragment.clear();
// If no pointer is supplied, set with all zeros and avoid memory accesses
if (!broadcast_ptr) {
return;
}
int thread_initial_column = ThreadMap::initial_offset(thread_idx_).column();
int thread_column_idx = threadblock_offset.column() + thread_initial_column;
broadcast_ptr += thread_initial_column;
NumericArrayConverter<ElementCompute, ElementVector, BroadcastDetail::kElementsPerAccess> converter;
using AccessType = AlignedArray<ElementVector, BroadcastDetail::kElementsPerAccess>;
using ComputeFragmentType = Array<ElementCompute, BroadcastDetail::kElementsPerAccess>;
ComputeFragmentType *frag_ptr = reinterpret_cast<ComputeFragmentType *>(&broadcast_fragment);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < ThreadMap::Iterations::kColumn; ++j) {
AccessType loaded;
loaded.clear();
if (thread_column_idx < problem_size.column()) {
loaded = *reinterpret_cast<AccessType const *>(broadcast_ptr);
}
ComputeFragmentType cvt = converter(loaded);
frag_ptr[j] = cvt;
thread_column_idx += ThreadMap::Delta::kColumn;
broadcast_ptr += ThreadMap::Delta::kColumn;
}
}
template <class Seq>
struct acc2smem_source_not_needed;
template <size_t... Seq>
struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> {
template <int Advance>
CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
warp_tile_iterator.store(accum_fragment);
if (p < Base::kFragmentsPerIteration - 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset);
}
}
if (Base::kFragmentsPerIteration > 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset *
(1 - Base::kFragmentsPerIteration));
}
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {
(pos == (Seq * Base::kFragmentsPerIteration)) &&
(helper<Seq * Base::kFragmentsPerIteration>(iterator_begin, warp_tile_iterator), 0)...};
CUTLASS_UNUSED(dummy[0]);
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const &output_op, ///< Output operator
BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
) {
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
// CUTLASS_PRAGMA_UNROLL
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; iter += Base::kFragmentsPerIteration) {
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_not_needed<
cutlass::make_index_sequence<OutputTileIterator::kIterations /
Base::kFragmentsPerIteration>>::push(iter,
accum_fragment_iterator,
this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
if (p < Base::kFragmentsPerIteration - 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
}
else if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
apply_output_operator_source_not_needed_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
broadcast_fragment);
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator.store(frag_Z);
++destination_iterator;
}
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
if (Base::kFragmentsPerIteration > 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration));
}
}
}
template<class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
) {
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
source_iterator.load(source_fragment);
++source_iterator;
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_tile_offset({tile_row_offset , 0});
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0});
}
//
// Apply output operation
//
typename OutputTileIterator::Fragment frag_Z;
typename TensorTileIterator::Fragment frag_T;
apply_output_operator_(
frag_Z,
frag_T,
output_op,
aligned_accum_fragment[0],
source_fragment,
broadcast_fragment);
//
// Conditionally store fragments
//
if (OutputOp::kStoreZ) {
destination_iterator.store(frag_Z);
++destination_iterator;
}
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
typename OutputTileIterator::Fragment &frag_Z,
typename TensorTileIterator::Fragment &frag_T,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &frag_AB,
typename OutputTileIterator::Fragment const &frag_C,
BroadcastFragment const &frag_Broadcast) {
using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>;
using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z);
AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T);
AccumulatorAccessType const *frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&frag_AB);
OutputAccessType const *frag_C_ptr =
reinterpret_cast<OutputAccessType const *>(&frag_C);
AccessTypeBroadcast const *frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(
frag_Z_ptr[i],
frag_T_ptr[i],
frag_AB_ptr[i],
frag_C_ptr[i],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed_(
typename OutputTileIterator::Fragment &frag_Z,
typename TensorTileIterator::Fragment &frag_T,
OutputOp const &output_op,
typename SharedLoadIterator::Fragment const &frag_AB,
BroadcastFragment const &frag_Broadcast) {
using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>;
using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z);
AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T);
AccumulatorAccessType const *frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&frag_AB);
AccessTypeBroadcast const *frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(
frag_Z_ptr[i],
frag_T_ptr[i],
frag_AB_ptr[i],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 52,430 | C | 33.02401 | 128 | 0.666183 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_with_visitor.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Generic epilogue for implementing certain kinds of fused epilogue behavior.
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/semaphore.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////////////////////////
class EpilogueFusedVisitorConcept {
public:
static int const kIterations = 1;
static int const kElementsPerAccess = 4;
using ElementOutput = float;
using ElementAccumulator = float;
using AccumulatorFragment = Array<ElementAccumulator, kElementsPerAccess>;
/// Arguments structure
struct Arguments { };
/// Params structure
struct Params {
Params() { }
Params(Arguments const &args) { }
};
/// Shared storage
struct SharedStorage { };
public:
CUTLASS_DEVICE
EpilogueFusedVisitorConcept(
Params const ¶ms, ///< Parameters routed to the epilogue
SharedStorage &shared_storage, ///< Shared storage needed by the functors here
MatrixCoord const &problem_size, ///< Problem size of the output
int thread_idx, ///< Thread index within the threadblock
int warp_idx, ///< Warp index within the threadblock
int lane_idx, ///< Lane index within the warp
MatrixCoord const &threadblock_offset = MatrixCoord(0, 0)) { ///< Coordinate
}
/// Helper to indicate split-K behavior
CUTLASS_DEVICE
void set_k_partition(
int split_k_index, ///< Index of this threadblock within split-K partitioned scheme
int split_k_slices) { ///< Total number of split-K slices
}
/// Called to set the batch index
CUTLASS_DEVICE
void set_batch_index(int batch_idx) {
}
/// Called at the start of the epilogue just before iterating over accumulator slices
CUTLASS_DEVICE
void begin_epilogue() {
}
/// Called at the start of one step before starting accumulator exchange
CUTLASS_DEVICE
void begin_step(int step_idx) {
}
/// Called at the start of a row
CUTLASS_DEVICE
void begin_row(int row_idx) {
}
/// Called after accumulators have been exchanged for each accumulator vector
CUTLASS_DEVICE
void visit(
int iter_idx,
int row_idx,
int column_idx,
int frag_idx,
AccumulatorFragment const &accum) {
}
/// Called at the end of a row
CUTLASS_DEVICE
void end_row(int row_idx) {
}
/// Called after all accumulator elements have been visited
CUTLASS_DEVICE
void end_step(int step_idx) {
}
/// Called after all steps have been completed
CUTLASS_DEVICE
void end_epilogue() {
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename Visitor_, ///< Functor containing fused operations (satisfies EpilogueFusedVisitorConcept)
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large
(true || !IsEpilogueFunctorHeavy<Visitor_>::value)
>
class EpilogueWithVisitor :
public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition> {
public:
using Visitor = Visitor_;
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = Visitor::kElementsPerAccess;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Array type used by output functor
using AccumulatorAccessType = Array<
typename WarpTileIterator::Element, kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
using SharedStorage = typename Base::SharedStorage;
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithVisitor(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.reference(), thread_idx)
{
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
Visitor & visitor,
AccumulatorTile const &accumulators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
visitor.begin_epilogue();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? Visitor::kIterations : 1)
for (int iter_idx = 0; iter_idx < Visitor::kIterations; ++iter_idx) {
//
// Load the source
//
visitor.begin_step(iter_idx);
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_needed<cutlass::make_index_sequence<Visitor::kIterations>>::push(
iter_idx, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Iterate over output fragments
//
AccumulatorAccessType const *accum_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment[0]);
int const kAccumulatorFragmentCount = AccumulatorTile::kElements / (Visitor::kIterations * AccumulatorAccessType::kElements);
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < kAccumulatorFragmentCount; ++idx) {
int row_idx = idx / SharedLoadIterator::ThreadMap::Iterations::kColumn;
int col_idx = idx % SharedLoadIterator::ThreadMap::Iterations::kColumn;
// Start a new row of the output fragment
if (!col_idx) {
visitor.begin_row(row_idx);
}
visitor.visit(
iter_idx,
row_idx,
col_idx,
idx,
accum_frag_ptr[idx]
);
// End the row of the output fragment
if (col_idx + 1 == SharedLoadIterator::ThreadMap::Iterations::kColumn) {
visitor.end_row(row_idx);
}
}
//
// Conclude the step
//
visitor.end_step(iter_idx);
}
visitor.end_epilogue();
}
private:
template<class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to create an EpilogueWithVisitor from an existing epilogue
template <typename Visitor_, typename Existing_, bool IterationsUnroll = true>
struct EpilogueWithVisitorFromExistingEpilogue {
using Epilogue = EpilogueWithVisitor<
Visitor_,
typename Existing_::Shape,
typename Existing_::WarpMmaOperator,
Existing_::kPartitionsK,
typename Existing_::AccumulatorFragmentIterator,
typename Existing_::WarpTileIterator,
typename Existing_::SharedLoadIterator,
typename Existing_::Padding,
Existing_::kFragmentsPerIteration,
IterationsUnroll
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 13,454 | C | 31.817073 | 131 | 0.62539 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_depthwise.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for Depthwise convoltuion
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/numeric_types.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename ThreadOutputShape_, /// Size of the matrix to load (concept: TensorNHWC)
typename ThreadBlockOutputShape_, /// Size of the matrix to load (concept: TensorNHWC)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept:
///< gemm::warp::MmaTensorOp)
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator
typename Padding_ ///< Padding added to SMEM allocation to avoid bank conflicts (concept:
///< MatrixShape)
>
class EpilogueDepthwise {
public:
using Shape = Shape_;
using WarpShape = typename WarpMmaOperator_::Shape;
using ThreadOutputShape = ThreadOutputShape_;
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
using WarpMmaOperator = WarpMmaOperator_;
using OutputTileIterator = OutputTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType =
Array<typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType =
Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount =
gemm::GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN>;
public:
static_assert(SharedLoadIterator::Fragment::kElements ==
OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess,
"OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
/// Shared storage allocation needed by the epilogue
struct SharedStorage {
//
// Type definitions
//
/// Element type of shared memory
using Element = typename WarpTileIterator::Element;
/// Tensor reference to shared memory allocation
using TensorRef = typename WarpTileIterator::TensorRef;
/// Layout of shared memory allocation
using Layout = typename WarpTileIterator::Layout;
/// Logical shape of the shared memory tile written to by all warps.
using Shape = MatrixShape<ThreadBlockOutputShape::kNHW, ThreadBlockOutputShape::kC>;
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<Shape::kRow, Shape::kColumn>;
//
// Data members
//
AlignedBuffer<Element, StorageShape::kCount> storage;
//
// Methods
//
/// Returns a pointer to the shared memory buffer
CUTLASS_DEVICE
Element *data() { return storage.data(); }
/// Returns a tensor reference to the shared memory buffer
CUTLASS_DEVICE
TensorRef reference() {
return TensorRef(storage.data(), Layout::packed({StorageShape::kRow, StorageShape::kColumn}));
}
};
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Stores a warp's fragment of accumulators to SMEM
WarpTileIterator warp_tile_iterator_;
LongIndex warp_offset;
int thread_idx;
int warp_idx;
int lane_idx;
int warp_m, warp_n; // warp coordinates within a cta
int tid_m, tid_n; // thread coordinates within a warp
public:
/// Constructor
CUTLASS_DEVICE
EpilogueDepthwise(SharedStorage &shared_storage, ///< Shared storage object
int thread_idx_, ///< ID of a thread within the threadblock
int warp_idx_, ///< ID of warp within threadblock
int lane_idx_ ///< Id of thread within warp
)
: thread_idx(thread_idx_),
warp_idx(warp_idx_),
lane_idx(lane_idx_),
shared_load_iterator_(shared_storage.reference(), thread_idx_),
warp_tile_iterator_(shared_storage.reference(), thread_idx_, lane_idx_) {}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Threadblock tile coordinate in GEMM (in
///< units of threadblock tiles)
const int smem_base_offset) { ///< SMEM base offset for epilogue operation
// initiate the smem base offset for different output tile.
warp_tile_iterator_.set_smem_base_address(smem_base_offset);
shared_load_iterator_.set_smem_base_address(smem_base_offset);
if (!output_op.is_source_needed()) {
compute_source_not_needed_(output_op, destination_iterator, accumulators);
} else {
compute_source_needed_(output_op, destination_iterator, accumulators, source_iterator);
}
}
private:
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
source_iterator.load(source_fragment);
// store to smem
warp_tile_iterator_.store(accumulators);
__syncthreads();
typename SharedLoadIterator::Fragment aligned_accum_fragment;
// load from smem
shared_load_iterator_.load(aligned_accum_fragment);
typename OutputTileIterator::Fragment output_fragment;
apply_output_operator_(output_fragment, output_op, aligned_accum_fragment, source_fragment);
// Store to GMEM
destination_iterator.store(output_fragment);
}
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
// store to smem
warp_tile_iterator_.store(accumulators);
__syncthreads();
typename SharedLoadIterator::Fragment aligned_accum_fragment;
// load from smem
shared_load_iterator_.load(aligned_accum_fragment);
typename OutputTileIterator::Fragment output_fragment;
apply_output_operator_source_not_needed_(output_fragment, output_op, aligned_accum_fragment);
// Store to GMEM
destination_iterator.store(output_fragment);
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
typename OutputTileIterator::Fragment &output_fragment,
OutputOp const &output_op, ///< Output operator
typename SharedLoadIterator::Fragment const &aligned_accum_fragment,
typename OutputTileIterator::Fragment const &source_fragment) {
OutputAccessType *output_frag_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment);
AccumulatorAccessType const *compute_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
OutputAccessType const *source_frag_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
output_frag_ptr[i] = output_op(compute_frag_ptr[i], source_frag_ptr[i]);
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed_(
typename OutputTileIterator::Fragment &output_fragment,
OutputOp const &output_op, ///< Output operator
typename SharedLoadIterator::Fragment const &aligned_accum_fragment) {
OutputAccessType *output_frag_ptr = reinterpret_cast<OutputAccessType *>(&output_fragment);
AccumulatorAccessType const *compute_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
output_frag_ptr[i] = output_op(compute_frag_ptr[i]);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 13,424 | C | 38.955357 | 120 | 0.674836 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_ ///< Element data type
>
class PredicatedTileIteratorStridedDgrad {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0");
static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0");
static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0");
static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
/// Convolution problem size
cutlass::conv::Conv2dProblemSize problem_size;
int tiled_rows_per_filter;
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout, cutlass::conv::Conv2dProblemSize problem_size_, int threadblock_row):
problem_size(problem_size_),
PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()
)
{
int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, threadblock_row);
tiled_rows_per_filter = tile_m_per_filter * threadblock_row;
}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
Params params_;
/// Byte-level pointer
uint8_t *byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Starting Dx h and w dimenstion for strided dgrad mapping
int start_h_, start_w_;
/// Effective Dy P and Q dimenstions for strided dgrad mapping
int p_, q_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// A thread's starting column position (assuming steady-state predicates have been computed)
Index thread_start_column_;
/// Internal state counter
int state_[3];
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorStridedDgrad(
Params const & params,
Element *pointer,
TensorCoord extent,
int thread_idx,
FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod,
int start_r, int start_s,
TensorCoord threadblock_offset = TensorCoord()
):
params_(params)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
int r = start_r;
int s = start_s;
if (params_.problem_size.mode == cutlass::conv::Mode::kConvolution) {
r = (params_.problem_size.R - 1 - r);
s = (params_.problem_size.S - 1 - s);
}
// compute starting coordinates in Dx start_h_ and start_w_
strided_dgrad_starting_coords(
params_.problem_size,
stride_h_divmod, stride_w_divmod,
r, s,
start_h_, start_w_);
p_ = (params_.problem_size.H - start_h_ + params_.problem_size.stride_h - 1) / params_.problem_size.stride_h;
q_ = (params_.problem_size.W - start_w_ + params_.problem_size.stride_w - 1) / params_.problem_size.stride_w;
extent_row_ = extent.row();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] = ((thread_offset.column()
+ ThreadMap::Delta::kColumn * c) < extent.column());
}
// Null pointer performs no accesses
if (!pointer) {
mask_.clear();
}
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t *>(pointer);
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
// remapping rows to find the mapped_row_offset
int npq_offset = (row_offset + thread_start_row_) % params_.tiled_rows_per_filter;
// (STEP 4.a) [order NHW rows to be loaded and stored in output Dx NHWxC layout]
int n = npq_offset / (p_ * q_);
int residual = npq_offset % (p_ * q_);
int p = residual / q_;
int q = residual % q_;
int mapped_row_offset = n * (params_.problem_size.H * params_.problem_size.W) +
(start_h_ + p * params_.problem_size.stride_h) * params_.problem_size.W +
(start_w_ + q * params_.problem_size.stride_w);
bool row_guard = mapped_row_offset < extent_row_;
int64_t row_byte_offset = mapped_row_offset * params_.stride;
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
int64_t column_byte_offset = (thread_start_column_ + column * ThreadMap::Delta::kColumn) * (sizeof_bits<Element>::value / 8);
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)(byte_pointer + row_byte_offset + column_byte_offset + byte_offset),
guard);
}
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
// remapping rows to find the mapped_row_offset
int npq_offset = (row_offset + thread_start_row_) % params_.tiled_rows_per_filter;
// (STEP 4.a) [order NHW rows to be loaded and stored in output Dx NHWxC layout]
int n = npq_offset / (p_ * q_);
int residual = npq_offset % (p_ * q_);
int p = residual / q_;
int q = residual % q_;
int mapped_row_offset = n * (params_.problem_size.H * params_.problem_size.W) +
(start_h_ + p * params_.problem_size.stride_h) * params_.problem_size.W +
(start_w_ + q * params_.problem_size.stride_w);
bool row_guard = mapped_row_offset < extent_row_;
int64_t row_byte_offset = mapped_row_offset * params_.stride;
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
int64_t column_byte_offset = (thread_start_column_ + column * ThreadMap::Delta::kColumn) * (sizeof_bits<Element>::value / 8);
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_store<AccessType, sizeof(AccessType) >(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void *)(byte_pointer + row_byte_offset + column_byte_offset + byte_offset),
guard);
}
}
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_byte_offset(frag, 0);
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorStridedDgrad &operator++() {
++state_[0];
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 15,536 | C | 31.36875 | 137 | 0.618177 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_planar_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/array_planar_complex.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator for planar-complex output representations.
///
/// Note, as with most CUTLASS components for planar complex, the template arguments describe
/// the underlying real data type.
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator
typename Padding_ ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
>
class EpiloguePlanarComplex {
public:
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
/// Output layout is always row-major
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = ArrayPlanarComplex<
typename WarpMmaOperator::FragmentC::Element,
WarpMmaOperator::FragmentC::kElements
>;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Shape of each warp-level operation
using WarpShape = typename WarpMmaOperator::Shape;
/// Number of warps
using WarpCount = gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
kPartitionsK
>;
/// Shared memory allocation
struct SharedStorage {
//
// Type definitions
//
/// Element type of shared memory
using Element = typename WarpTileIterator::Element;
/// Tensor reference to shared memory allocation
using TensorRef = typename WarpTileIterator::TensorRef;
/// Layout of shared memory allocation
using Layout = typename WarpTileIterator::Layout;
/// Logical shape of the shared memory tile written to by all warps.
using Shape = MatrixShape<
WarpCount::kM * WarpTileIterator::Shape::kRow * WarpCount::kK,
WarpCount::kN * WarpTileIterator::Shape::kColumn
>;
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
Shape::kRow + Padding::kRow,
Shape::kColumn + Padding::kColumn
>;
static int const kImaginaryStride = StorageShape::kCount;
//
// Data members
//
AlignedBuffer<Element, kImaginaryStride * 2> storage;
//
// Methods
//
/// Returns a pointer to the shared memory buffer
CUTLASS_DEVICE
Element *data() {
return storage.data();
}
/// Returns a tensor reference to the shared memory buffer
CUTLASS_DEVICE
TensorRef reference() {
return TensorRef(
storage.data(),
Layout::packed({StorageShape::kRow, StorageShape::kColumn}));
}
};
private:
//
// Data members
//
SharedStorage &shared_storage_;
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Stores a warp's fragment of accumulators to SMEM
WarpTileIterator warp_tile_iterator_;
public:
/// Constructor
CUTLASS_DEVICE
EpiloguePlanarComplex(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
shared_storage_(shared_storage),
shared_load_iterator_(shared_storage.reference(), thread_idx),
warp_tile_iterator_(shared_storage.reference(), lane_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to three coordinates:
//
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_k = warp_idx / (WarpCount::kM * WarpCount::kN);
int warp_mn = warp_idx % (WarpCount::kM * WarpCount::kN);
int warp_m = warp_mn % WarpCount::kM;
int warp_n = warp_mn / WarpCount::kM;
MatrixCoord warp_offset{warp_k * WarpCount::kM + warp_m, warp_n};
warp_tile_iterator_.add_tile_offset(warp_offset);
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator_real, ///< Tile iterator for destination
OutputTileIterator destination_iterator_imag, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator_real, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
OutputTileIterator source_iterator_imag) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
typename OutputTileIterator::Fragment source_fragment_real;
typename OutputTileIterator::Fragment source_fragment_imag;
if (!output_op.is_source_needed()) {
source_iterator_real.clear_mask();
source_iterator_imag.clear_mask();
}
source_fragment_real.clear();
source_fragment_imag.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator_real(accumulators.real);
AccumulatorFragmentIterator accum_fragment_iterator_imag(accumulators.imag);
//
// Iterate over accumulator tile
//
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
source_iterator_real.load(source_fragment_real);
source_iterator_imag.load(source_fragment_imag);
++source_iterator_real;
++source_iterator_imag;
//
// Convert and store fragment
//
__syncthreads();
typename AccumulatorFragmentIterator::Fragment accum_fragment_real;
typename AccumulatorFragmentIterator::Fragment accum_fragment_imag;
accum_fragment_iterator_real.load(accum_fragment_real);
accum_fragment_iterator_imag.load(accum_fragment_imag);
++accum_fragment_iterator_real;
++accum_fragment_iterator_imag;
this->warp_tile_iterator_.store(accum_fragment_real);
this->warp_tile_iterator_.store_with_pointer_offset(accum_fragment_imag, SharedStorage::kImaginaryStride);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment_real[kPartitionsK];
typename SharedLoadIterator::Fragment aligned_accum_fragment_imag[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment_real[0]);
shared_load_iterator_.load_with_pointer_offset(aligned_accum_fragment_imag[0], SharedStorage::kImaginaryStride);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
static_assert(kPartitionsK == 1, "Sliced-K not supported for planar complex at this time");
//
// Compute the output result
//
typename OutputTileIterator::Fragment output_fragment_real;
typename OutputTileIterator::Fragment output_fragment_imag;
apply_output_operator_(
output_fragment_real,
output_fragment_imag,
output_op,
aligned_accum_fragment_real[0],
aligned_accum_fragment_imag[0],
source_fragment_real,
source_fragment_imag);
//
// Store the final result
//
destination_iterator_real.store(output_fragment_real);
destination_iterator_imag.store(output_fragment_imag);
++destination_iterator_real;
++destination_iterator_imag;
}
}
private:
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
typename OutputTileIterator::Fragment &output_fragment_real,
typename OutputTileIterator::Fragment &output_fragment_imag,
OutputOp const &output_op, ///< Output operator
typename SharedLoadIterator::Fragment const &aligned_accum_fragment_real,
typename SharedLoadIterator::Fragment const &aligned_accum_fragment_imag,
typename OutputTileIterator::Fragment const &source_fragment_real,
typename OutputTileIterator::Fragment const &source_fragment_imag) {
OutputAccessType *output_frag_real_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment_real);
OutputAccessType *output_frag_imag_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment_imag);
AccumulatorAccessType const *compute_frag_real_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment_real);
AccumulatorAccessType const *compute_frag_imag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment_imag);
OutputAccessType const *source_frag_real_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment_real);
OutputAccessType const *source_frag_imag_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment_imag);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
auto result_fragment = output_op(
make_ArrayPlanarComplex(compute_frag_real_ptr[i], compute_frag_imag_ptr[i]),
make_ArrayPlanarComplex(source_frag_real_ptr[i], source_frag_imag_ptr[i])
);
output_frag_real_ptr[i] = result_fragment.real;
output_frag_imag_ptr[i] = result_fragment.imag;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 14,610 | C | 35.345771 | 128 | 0.680767 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_with_reduction.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator with reduction over each column
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename TensorTileIterator_, ///< Additional tile iterator for tensor-valued operands
typename ElementVector_, ///< Pointer to reduction vector
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator
typename ReductionOp_, ///< Reduction operator
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large
(!IsEpilogueFunctorHeavy<OutputOp_>::value)
>
class EpilogueWithReduction :
public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_> {
public:
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_>;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using TensorTileIterator = TensorTileIterator_;
using ElementVector = ElementVector_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using ReductionOp = ReductionOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
static bool const kIsSingleSource = true;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Compute data type produced by the output op
using ElementCompute = typename OutputOp::ElementCompute;
/// Compute fragment
using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>;
/// Thread map used by output tile iterators
using ThreadMap = typename OutputTileIterator::ThreadMap;
/// Fragment object used in reduction
using ReductionFragment = Array<
ElementAccumulator,
ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Data type of additional tensor
using ElementTensor = typename TensorTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>;
/// Tensor access type
using TensorAccessType = Array<ElementTensor, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
/// Shared memory allocation from epilogue base class
using BaseSharedStorage = typename Base::SharedStorage;
/// Used for the reduction
struct ReductionDetail {
/// If true, accumulator coordinates are computed and out-of-bounds checks are enabled when
/// performing the reduction.
static bool const kOobCheck = false;
/// Number of threads per warp
static int const kWarpSize = 32;
/// Number of distinct scalar column indices handled by each thread
static int const kColumnsPerThread = ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess;
/// Number of distinct scalar row indices handled by each thread
static int const kRowsPerThread = ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn;
/// Number of threads per threadblock
static int const kThreadCount = kWarpSize * WarpCount::kCount;
/// Number of distinct threads per row of output tile
static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread);
/// Number of distinct threads which must be reduced during the final reduction phase within the threadblock.
static int const kThreadRows = kThreadCount / kThreadsPerRow;
/// I'm not sure what I meant here.
static int const kThreadAccessesPerRow = const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount);
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
kThreadRows,
Shape::kN
>;
/// Debug printing
CUTLASS_DEVICE
static void print() {
#if 0
printf("ReductionDetail {\n");
printf(
" kElementsPerAccess:%d\nkColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n"
"kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n",
kElementsPerAccess,
kColumnsPerThread,
kRowsPerThread,
kThreadCount,
kThreadsPerRow,
kThreadRows,
kThreadAccessesPerRow,
StorageShape::kRow,
StorageShape::kColumn,
StorageShape::kCount
);
printf("};\n");
#endif
}
};
/// Shared storage structure (shadows base) with additional SMEM buffer for reduction
struct SharedStorage {
union {
BaseSharedStorage base;
AlignedArray<ElementAccumulator, ReductionDetail::StorageShape::kCount, 16> reduction; ///< Shared storage for reduction
};
CUTLASS_HOST_DEVICE
SharedStorage() { }
};
public:
static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Shared memory pointer fo rreduction
ElementAccumulator *reduction_ptr_;
/// Thread index within the threadblock
int thread_idx_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithReduction(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
Base(shared_storage.base, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.base.reference(), thread_idx),
reduction_ptr_(shared_storage.reduction.data()),
thread_idx_(thread_idx)
{
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
ElementVector * reduction_output_ptr, ///< Reduction output vector
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord()) {
ReductionFragment reduction_fragment;
reduction_fragment.clear();
if (!output_op.is_source_needed()) {
compute_source_not_needed_(
output_op,
reduction_fragment,
destination_iterator,
accumulators,
tensor_iterator,
problem_size,
threadblock_offset);
}
else {
compute_source_needed_(
output_op,
reduction_fragment,
destination_iterator,
accumulators,
source_iterator,
tensor_iterator,
problem_size,
threadblock_offset);
}
if (output_op.participates_in_reduction()) {
reduction_(problem_size, threadblock_offset, reduction_output_ptr, reduction_fragment);
}
}
private:
/// Perform the reduction
CUTLASS_DEVICE
void reduction_(
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset, ///< Problem size needed to guard against out-of-bounds accesses
ElementVector * reduction_output_ptr, ///< Reduction output vector
ReductionFragment const & reduction_fragment) {
//
// Store the partially reduced value to SMEM
//
// Guard against uses of the existing SMEM tile
__syncthreads();
using AccessType = AlignedArray<ElementAccumulator, ThreadMap::kElementsPerAccess>;
//
// Determine a compacted thread arrangement to store to SMEM.
//
int const kThreadsPerRow = Shape::kN / (ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess);
MatrixCoord thread_offset(
thread_idx_ / kThreadsPerRow,
(thread_idx_ % kThreadsPerRow) * ThreadMap::kElementsPerAccess);
//
// Each thread store its fragment to a SMEM
//
AccessType *aligned_reduction_ptr = reinterpret_cast<AccessType *>(
&reduction_ptr_[thread_offset.row() * Shape::kN + thread_offset.column()]);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&reduction_fragment);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
int col_idx = column * ThreadMap::Delta::kColumn / ThreadMap::kElementsPerAccess;
aligned_reduction_ptr[col_idx] = frag_ptr[column];
}
__syncthreads();
//
// Now, threads are assigned several columns of the output. They fetch over all rows from
// the compacted SMEM tile and perform a reduction.
//
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < ReductionDetail::kThreadAccessesPerRow; ++j) {
int column_idx = thread_idx_ + j * ReductionDetail::kThreadCount;
ReductionOp reduction_op;
ElementAccumulator reduction_element = ElementAccumulator();
int output_column_idx = threadblock_offset.column() + column_idx;
if (column_idx < Shape::kN && output_column_idx < problem_size.column()) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ReductionDetail::kThreadRows; ++row) {
if (row) {
auto frag = reduction_ptr_[row * Shape::kN + column_idx];
reduction_element = reduction_op(reduction_element, frag);
}
else {
reduction_element = reduction_ptr_[column_idx];
}
}
// Store
reduction_output_ptr[column_idx] = ElementVector(reduction_element);
}
}
}
template<class Seq>
struct acc2smem;
template <size_t... Seq>
struct acc2smem<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const &output_op, ///< Output operator
ReductionFragment &reduction_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additioanl tensor operand
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space
) {
//
// Iterator over warp-level accumulator fragment
//
typename TensorTileIterator::Fragment tensor_fragment;
tensor_fragment.clear();
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Convert and store fragment
//
tensor_iterator.load(tensor_fragment);
++tensor_iterator;
__syncthreads();
acc2smem<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
//
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
//
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_tile_offset({tile_row_offset , 0});
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0});
}
//
// Compute the output result
//
FragmentCompute compute_fragment;
apply_output_operator_source_not_needed_(
reduction_fragment,
compute_fragment,
output_op,
aligned_accum_fragment[0],
tensor_fragment,
destination_iterator);
//
// Store the final result
//
NumericArrayConverter<ElementOutput, ElementCompute, FragmentCompute::kElements> converter;
typename OutputTileIterator::Fragment output_fragment = converter(compute_fragment);
destination_iterator.store(output_fragment);
++destination_iterator;
}
}
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const &output_op, ///< Output operator
ReductionFragment &reduction_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additioanl tensor operand
MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space
) {
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
typename TensorTileIterator::Fragment tensor_fragment;
tensor_fragment.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
source_fragment.clear();
source_iterator.load(source_fragment);
++source_iterator;
tensor_iterator.load(tensor_fragment);
++tensor_iterator;
//
// Convert and store fragment
//
__syncthreads();
acc2smem<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1)
{
plus <typename SharedLoadIterator::Fragment> add_fragments;
const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_tile_offset({tile_row_offset , 0});
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0});
}
//
// Compute the output result
//
FragmentCompute compute_fragment;
apply_output_operator_(
reduction_fragment,
compute_fragment,
output_op,
aligned_accum_fragment[0],
source_fragment,
tensor_fragment,
destination_iterator);
//
// Convert and store the final result
//
NumericArrayConverter<ElementOutput, ElementCompute, FragmentCompute::kElements> converter;
typename OutputTileIterator::Fragment output_fragment = converter(compute_fragment);
destination_iterator.store(output_fragment);
++destination_iterator;
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
ReductionFragment &reduction_fragment,
FragmentCompute &compute_fragment,
OutputOp const &output_op, ///< Output operator
typename SharedLoadIterator::Fragment const &aligned_accum_fragment,
typename OutputTileIterator::Fragment const &source_fragment,
typename TensorTileIterator::Fragment const &tensor_fragment,
OutputTileIterator const & destination_iterator) {
ComputeAccessType *compute_frag_ptr =
reinterpret_cast<ComputeAccessType *>(&compute_fragment);
AccumulatorAccessType const *accum_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
OutputAccessType const *source_frag_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment);
TensorAccessType const *tensor_frag_ptr =
reinterpret_cast<TensorAccessType const *>(&tensor_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
compute_frag_ptr[i] = output_op(accum_frag_ptr[i], source_frag_ptr[i], tensor_frag_ptr[i]);
}
//
// Partial reduction over each column
//
ReductionOp reduction_op;
typename OutputTileIterator::Mask mask;
destination_iterator.get_mask(mask);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ReductionDetail::kColumnsPerThread; ++column) {
int column_vector_idx = column / ThreadMap::kElementsPerAccess;
bool column_guard = mask.predicates[column_vector_idx];
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ReductionDetail::kRowsPerThread; ++row) {
bool fetch;
if (ReductionDetail::kOobCheck) {
int row_idx = (row % ThreadMap::Iterations::kRow);
int residual = (row / ThreadMap::Iterations::kRow);
int group_idx = (residual % ThreadMap::Iterations::kGroup);
residual = (residual / ThreadMap::Iterations::kGroup);
int cluster_idx = (residual % ThreadMap::Iterations::kCluster);
int row_offset = row_idx * ThreadMap::Delta::kRow
+ group_idx * ThreadMap::Delta::kGroup
+ cluster_idx * ThreadMap::Delta::kCluster;
int output_row = destination_iterator.thread_start_row() + row_offset;
fetch = (output_row < destination_iterator.extent_row() && column_guard);
}
else {
fetch = true;
}
ElementCompute value = ElementCompute();
if (fetch) {
value = compute_fragment[row * ReductionDetail::kColumnsPerThread + column];
}
reduction_fragment[column] = reduction_op(
reduction_fragment[column],
value);
}
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed_(
ReductionFragment &reduction_fragment,
FragmentCompute &compute_fragment,
OutputOp const &output_op, ///< Output operator
typename SharedLoadIterator::Fragment const &aligned_accum_fragment,
typename TensorTileIterator::Fragment const &tensor_fragment,
OutputTileIterator const & destination_iterator
) {
ComputeAccessType *compute_frag_ptr =
reinterpret_cast<ComputeAccessType *>(&compute_fragment);
AccumulatorAccessType const *accum_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment);
TensorAccessType const *tensor_frag_ptr =
reinterpret_cast<TensorAccessType const *>(&tensor_fragment);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
compute_frag_ptr[i] = output_op(accum_frag_ptr[i], tensor_frag_ptr[i]);
}
//
// Partial reduction over each column
//
ReductionOp reduction_op;
typename OutputTileIterator::Mask mask;
destination_iterator.get_mask(mask);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ReductionDetail::kColumnsPerThread; ++column) {
int column_vector_idx = column / ThreadMap::kElementsPerAccess;
bool column_guard = mask.predicates[column_vector_idx];
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ReductionDetail::kRowsPerThread; ++row) {
bool fetch;
if (ReductionDetail::kOobCheck) {
int row_idx = (row % ThreadMap::Iterations::kRow);
int residual = (row / ThreadMap::Iterations::kRow);
int group_idx = (residual % ThreadMap::Iterations::kGroup);
residual = (residual / ThreadMap::Iterations::kGroup);
int cluster_idx = (residual % ThreadMap::Iterations::kCluster);
int row_offset = row_idx * ThreadMap::Delta::kRow
+ group_idx * ThreadMap::Delta::kGroup
+ cluster_idx * ThreadMap::Delta::kCluster;
int output_row = destination_iterator.thread_start_row() + row_offset;
fetch = (output_row < destination_iterator.extent_row() && column_guard);
}
else {
fetch = true;
}
ElementCompute value = ElementCompute();
if (fetch) {
value = compute_fragment[row * ReductionDetail::kColumnsPerThread + column];
}
reduction_fragment[column] = reduction_op(
reduction_fragment[column],
value);
}
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 29,199 | C | 34.436893 | 129 | 0.661666 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_relu0.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_hardswish.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator_mixed.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/threadblock/interleaved_epilogue.h"
#include "cutlass/layout/permute.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename ElementOutput,
typename ElementAccumulator,
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
ElementAccumulator,
layout::RowMajor
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
ElementAccumulator
>;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for float <= float x 4
template <
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<float, float, 4, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
float,
layout::RowMajor
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
float
>;
static int const kFragmentsPerIteration = 2;
};
/// Partial specialization for int32_t <= int32_t x 4
template <
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<int32_t, int32_t, 4, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
int32_t,
layout::RowMajor
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
int32_t
>;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for float <= int32_t x 4
template <
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<float, int32_t, 4, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
int32_t,
layout::RowMajor
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
int32_t
>;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for half <= float x 8 epilogues avoids shared memory bank conflicts.
template <
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<
half_t,
float,
8,
ThreadblockShape,
WarpShape,
InstructionShape,
ThreadMap> {
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOpMixed<
WarpShape,
InstructionShape,
float,
32,
16,
8,
8
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIteratorMixed<
ThreadMap,
float,
32,
16,
8,
8
>;
static int const kFragmentsPerIteration = 2;
};
/// Partial specialization for int8/int4b_t <= int32 x 16/8 epilogues avoids shared memory bank conflicts.
/// Threadblock::kN = 256 still has bank conflicts.
template <
typename ElementOutput,
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<
ElementOutput,
int32_t,
ElementsPerAccess,
ThreadblockShape,
WarpShape,
InstructionShape,
ThreadMap> {
static_assert(platform::is_same<ElementOutput, cutlass::int4b_t>::value ||
platform::is_same<ElementOutput, cutlass::uint4b_t>::value ||
platform::is_same<ElementOutput, int8_t>::value ||
platform::is_same<ElementOutput, uint8_t>::value,
"ElementOutput needs to be 4 or 8 bit (unsigned) int.");
static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8),
"ElementsPerAccess needs to be 16 or 8.");
using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed<
WarpShape,
InstructionShape,
int32_t,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
int32_t,
layout::RowMajor
>;
using WarpTileIterator = typename platform::conditional<
(ThreadblockShape::kN == 256),
WarpTileIteratorNotMixed,
WarpTileIteratorMixed>::type;
using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed<
ThreadMap,
int32_t,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
int32_t
>;
using SharedLoadIterator = typename platform::conditional<
(ThreadblockShape::kN == 256),
SharedLoadIteratorNotMixed,
SharedLoadIteratorMixed>::type;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for float_e4m3_t <= float x 16/8 epilogues avoids shared memory bank conflicts.
/// Threadblock::kN = 256 still has bank conflicts.
template <
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<
cutlass::float_e4m3_t,
float,
ElementsPerAccess,
ThreadblockShape,
WarpShape,
InstructionShape,
ThreadMap> {
using ElementOutput = cutlass::float_e4m3_t;
static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8),
"ElementsPerAccess needs to be 16 or 8.");
using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed<
WarpShape,
InstructionShape,
float,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
float,
layout::RowMajor
>;
using WarpTileIterator = typename platform::conditional<
(ThreadblockShape::kN == 256),
WarpTileIteratorNotMixed,
WarpTileIteratorMixed>::type;
using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed<
ThreadMap,
float,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
float
>;
using SharedLoadIterator = typename platform::conditional<
(ThreadblockShape::kN == 256),
SharedLoadIteratorNotMixed,
SharedLoadIteratorMixed>::type;
static int const kFragmentsPerIteration = 1;
};
/// Partial specialization for float_e5m2_t <= float x 16/8 epilogues avoids shared memory bank conflicts.
/// Threadblock::kN = 256 still has bank conflicts.
template <
int ElementsPerAccess,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename ThreadMap
>
struct DefaultIteratorsTensorOp<
cutlass::float_e5m2_t,
float,
ElementsPerAccess,
ThreadblockShape,
WarpShape,
InstructionShape,
ThreadMap> {
using ElementOutput = cutlass::float_e5m2_t;
static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8),
"ElementsPerAccess needs to be 16 or 8.");
using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed<
WarpShape,
InstructionShape,
float,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
float,
layout::RowMajor
>;
using WarpTileIterator = typename platform::conditional<
(ThreadblockShape::kN == 256),
WarpTileIteratorNotMixed,
WarpTileIteratorMixed>::type;
using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed<
ThreadMap,
float,
32,
cutlass::sizeof_bits<ElementOutput>::value,
ElementsPerAccess,
8
>;
using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator<
ThreadMap,
float
>;
using SharedLoadIterator = typename platform::conditional<
(ThreadblockShape::kN == 256),
SharedLoadIteratorNotMixed,
SharedLoadIteratorMixed>::type;
static int const kFragmentsPerIteration = 1;
};
} // namespace detail
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess,
bool ScatterD = false,
typename PermuteDLayout = layout::NoPermute
>
struct DefaultEpilogueTensorOp {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
static bool const UseCUDAStore = platform::is_same<ElementOutput, double>::value;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout,
UseCUDAStore
>;
using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value,
cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC>,
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC> >::type;
/// Support several implementations depending on structure of epilogue
using DefaultIterators = detail::DefaultIteratorsTensorOp<
ElementOutput,
ElementAccumulator,
kElementsPerAccess,
Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename OutputTileThreadMap::CompactedThreadMap
>;
using WarpTileIterator = typename DefaultIterators::WarpTileIterator;
using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>;
static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1);
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding,
kFragmentsPerIteration
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueTensorOpStridedDgrad {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad<
OutputTileThreadMap,
ElementOutput
>;
using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value,
cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC>,
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC> >::type;
/// Support several implementations depending on structure of epilogue
using DefaultIterators = detail::DefaultIteratorsTensorOp<
ElementOutput,
ElementAccumulator,
kElementsPerAccess,
Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename OutputTileThreadMap::CompactedThreadMap
>;
using WarpTileIterator = typename DefaultIterators::WarpTileIterator;
using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>;
static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1);
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding,
kFragmentsPerIteration
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
int Rank,
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueTensorOpAffineRankN {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankN<
OutputTileThreadMap,
ElementOutput,
Rank
>;
// Map to the row major iterator since the iterator selection for affineN is the same.
using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value,
cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
layout::RowMajor>,
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
layout::RowMajor> >::type;
/// Support several implementations depending on structure of epilogue
using DefaultIterators = detail::DefaultIteratorsTensorOp<
ElementOutput,
ElementAccumulator,
kElementsPerAccess,
Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename OutputTileThreadMap::CompactedThreadMap
>;
using WarpTileIterator = typename DefaultIterators::WarpTileIterator;
using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>;
static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1);
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding,
kFragmentsPerIteration
>;
};
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps which uses
/// intereleaved output layout. For this case, shared memory is not needed.
template <typename Shape_, typename WarpMmaTensorOp_, int PartitionsK,
typename OutputOp_, int ElementsPerAccess, int InterleavedK,
bool isSplitK = false>
struct DefaultInterleavedEpilogueTensorOp {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::
DefaultInterleavedThreadMapTensorOp<
Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput,
kElementsPerAccess, InterleavedK>::Type;
using OutputTileIterator =
cutlass::epilogue::threadblock::InterleavedPredicatedTileIterator<
OutputTileThreadMap, ElementOutput, InterleavedK>;
using AccumulatorFragmentIterator =
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::InterleavedEpilogue<
Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator,
AccumulatorFragmentIterator, OutputOp, InterleavedK>;
};
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps which uses
/// intereleaved output layout. For this case, shared memory is not needed.
template <typename Shape_, typename WarpMmaTensorOp_, int PartitionsK,
typename OutputOp_, int ElementsPerAccess, int InterleavedK,
bool isSplitK = false>
struct DefaultInterleavedConvEpilogue {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::
DefaultInterleavedConvThreadMapTensorOp<
Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput,
kElementsPerAccess, InterleavedK>::Type;
using OutputTileIterator =
cutlass::epilogue::threadblock::InterleavedConvPredicatedTileIterator<
OutputTileThreadMap, ElementOutput, InterleavedK>;
using AccumulatorFragmentIterator =
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
// can reuse the gemm version here to do element selection
layout::ColumnMajorInterleaved<InterleavedK>>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::InterleavedEpilogue<
Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator,
AccumulatorFragmentIterator, OutputOp, InterleavedK>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 27,150 | C | 32.561187 | 112 | 0.675212 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_visitor_with_softmax.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue visitor for threadblock scoped GEMMs that process softmax computations in epilogue.
The epilogue finds max values in each row of the row-major output matrix and stores them.
The max values are also used for a further round of threadblock scoped reduction operation, where
the partial reduction results are stored in a pre-allocated array and used for further full reduction.
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/fast_math.h"
namespace cutlass {
namespace epilogue {
namespace threadblock {
template <
typename ThreadblockShape_,
int ThreadCount,
typename OutputTileIterator_,
typename ElementAccumulator_,
typename ElementNorm_,
typename ElementSum_,
typename ElementSoftmaxCompute_,
typename ElementwiseFunctor_,
bool UseMasking_ = false
>
class EpilogueVisitorSoftmax {
public:
using ThreadblockShape = ThreadblockShape_;
static int const kThreadCount = ThreadCount;
using OutputTileIterator = OutputTileIterator_;
using ElementwiseFunctor = ElementwiseFunctor_;
static int const kIterations = OutputTileIterator::kIterations;
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
using ElementOutput = typename OutputTileIterator::Element;
using LayoutOutput = cutlass::layout::RowMajor;
using ElementAccumulator = ElementAccumulator_;
using ElementNorm = ElementNorm_;
using ElementSum = ElementSum_;
using ElementSoftmaxCompute = ElementSoftmaxCompute_;
using AccumulatorFragment = Array<ElementAccumulator, kElementsPerAccess>;
using SoftmaxFragment = Array<ElementSoftmaxCompute, kElementsPerAccess>;
using OutputVector = Array<ElementOutput, kElementsPerAccess>;
using TensorRefD = TensorRef<ElementOutput, LayoutOutput>;
static int const kThreadsPerRow = OutputTileIterator::ThreadMap::Detail::kAccessWidth;
static bool const kHasMultiStepsInRow = (OutputTileIterator::ThreadMap::Iterations::kColumn > 1);
static bool const kUseMasking = UseMasking_;
/// Argument structure
struct Arguments {
typename ElementwiseFunctor::Params elementwise;
int64_t batch_stride_C;
int64_t batch_stride_D;
int64_t batch_stride_Max;
int64_t batch_stride_Sum;
//
// Methods
//
Arguments():
batch_stride_C(0),
batch_stride_D(0),
batch_stride_Max(0),
batch_stride_Sum(0)
{
}
Arguments(
typename ElementwiseFunctor::Params elementwise_
):
elementwise(elementwise_),
batch_stride_C(0),
batch_stride_D(0),
batch_stride_Max(0),
batch_stride_Sum(0)
{
}
Arguments(
typename ElementwiseFunctor::Params elementwise_,
int64_t batch_stride_C_,
int64_t batch_stride_D_,
int64_t batch_stride_Max_,
int64_t batch_stride_Sum_
):
elementwise(elementwise_),
batch_stride_C(batch_stride_C_),
batch_stride_D(batch_stride_D_),
batch_stride_Max(batch_stride_Max_),
batch_stride_Sum(batch_stride_Sum_)
{
}
};
struct Params {
typename ElementwiseFunctor::Params elementwise;
int64_t batch_stride_C;
int64_t batch_stride_D;
int64_t batch_stride_Max;
int64_t batch_stride_Sum;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params()
{
}
CUTLASS_HOST_DEVICE
Params(Arguments const &args):
elementwise(args.elementwise),
batch_stride_C(args.batch_stride_C),
batch_stride_D(args.batch_stride_D),
batch_stride_Max(args.batch_stride_Max),
batch_stride_Sum(args.batch_stride_Sum)
{
}
};
/// Shared storage
struct SharedStorage {
};
private:
Params const & params_;
SharedStorage & shared_storage_;
MatrixCoord extent_;
MatrixCoord extent_real_;
ElementwiseFunctor elementwise_;
OutputTileIterator iterator_C_;
OutputTileIterator iterator_D_;
typename OutputTileIterator::Fragment fragment_C_;
typename OutputTileIterator::Fragment fragment_D_;
ElementAccumulator alpha_;
ElementAccumulator beta_;
ElementNorm *ptr_Max_;
ElementSum *ptr_Sum_;
int column_offset_;
ElementSoftmaxCompute accum_max_;
ElementSoftmaxCompute accum_sum_;
MatrixCoord thread_offset_;
float infinity_;
public:
CUTLASS_DEVICE
EpilogueVisitorSoftmax(
Params const ¶ms,
SharedStorage &shared_storage,
cutlass::MatrixCoord const &problem_size,
int thread_idx,
int warp_idx,
int lane_idx,
typename OutputTileIterator::Params params_C,
typename OutputTileIterator::Params params_D,
typename OutputTileIterator::Element *ptr_C,
typename OutputTileIterator::Element *ptr_D,
ElementNorm *ptr_Max = nullptr,
ElementSum *ptr_Sum = nullptr,
cutlass::MatrixCoord const &threadblock_offset = cutlass::MatrixCoord(0, 0),
int column_offset = 0,
cutlass::MatrixCoord const &problem_size_real = cutlass::MatrixCoord(0, 0),
float infinity = 10000.0f
):
params_(params),
shared_storage_(shared_storage),
extent_(problem_size),
elementwise_(params.elementwise),
iterator_C_(params_C, ptr_C, problem_size, thread_idx, threadblock_offset),
iterator_D_(params_D, ptr_D, problem_size, thread_idx, threadblock_offset),
ptr_Max_(ptr_Max),
ptr_Sum_(ptr_Sum),
column_offset_(column_offset),
extent_real_(problem_size_real),
infinity_(infinity)
{
alpha_ = (params.elementwise.alpha_ptr ? *params.elementwise.alpha_ptr : params.elementwise.alpha);
beta_ = (params.elementwise.beta_ptr ? *params.elementwise.beta_ptr : params.elementwise.beta);
if (beta_ == ElementAccumulator()) {
iterator_C_.clear_mask();
}
}
/// Helper to indicate split-K behavior
CUTLASS_DEVICE
void set_k_partition(
int split_k_index, ///< Index of this threadblock within split-K partitioned scheme
int split_k_slices) { ///< Total number of split-K slices
}
/// Called to set the batch index
CUTLASS_DEVICE
void set_batch_index(int batch_idx) {
iterator_C_.add_pointer_offset(batch_idx * params_.batch_stride_C);
iterator_D_.add_pointer_offset(batch_idx * params_.batch_stride_D);
}
/// Called at the start of the epilogue just before iterating over accumulator slices
CUTLASS_DEVICE
void begin_epilogue() {
}
/// Called at the start of one step before starting accumulator exchange
CUTLASS_DEVICE
void begin_step(int step_idx) {
fragment_D_.clear();
fragment_C_.clear();
if (elementwise_.kScale != cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling) {
iterator_C_.load(fragment_C_);
++iterator_C_;
}
}
/// Called at the start of a row
CUTLASS_DEVICE
void begin_row(int row_idx) {
// Clear accumulators for max and sum when starting a whole row
clear_accum_();
}
/// Called after accumulators have been exchanged for each accumulator vector
CUTLASS_DEVICE
void visit(
int iter_idx,
int row_idx,
int column_idx,
int frag_idx,
AccumulatorFragment const &accum) {
using Mul = cutlass::multiplies<SoftmaxFragment>;
using Minus = cutlass::minus<SoftmaxFragment>;
using Exp = cutlass::fast_exp_op<SoftmaxFragment>;
Minus minus;
Exp exponential;
SoftmaxFragment result;
NumericArrayConverter<ElementSoftmaxCompute, ElementOutput, kElementsPerAccess> source_converter;
OutputVector &source_vector = reinterpret_cast<OutputVector *>(&fragment_C_)[frag_idx];
if (elementwise_.kScale == cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling) {
result = source_converter(elementwise_(accum));
}else{
result = source_converter(elementwise_(accum, source_vector));
}
thread_offset_ =
iterator_D_.thread_start() +
OutputTileIterator::ThreadMap::iteration_offset(frag_idx);
bool column_guard = (thread_offset_.column() < extent_.column());
if (kUseMasking) {
int elements_in_boundary = extent_real_.column() - thread_offset_.column();
elements_in_boundary = (elements_in_boundary > kElementsPerAccess) ? kElementsPerAccess : elements_in_boundary;
elementwise_padding_(result, elements_in_boundary);
}
ElementSoftmaxCompute accum_max_prev = accum_max_;
// Compute the maximum within one row
if (!column_idx) {
// This is the first fragment in a new row
if (column_guard) {
accum_max_ = maximum_accumulator_(result);
}
}
else {
// This is an additional fragment in the same row
if (column_guard) {
accum_max_ = maximum_accumulator_(result, accum_max_);
}
}
// proactively compute max in warps
accum_max_ = warp_reduce_max_(accum_max_);
ElementSoftmaxCompute updater = fast_exp(accum_max_prev - accum_max_);
SoftmaxFragment intermediate = exponential(minus(result, accum_max_));
if (kHasMultiStepsInRow) {
if (!column_idx) {
accum_sum_ = (column_guard) ? \
sum_accumulator_(intermediate) : ElementSoftmaxCompute(0);
} else {
// Algorithm in $3.1, https://arxiv.org/pdf/2205.14135v1.pdf
// S* = S* x updater + sum_row(P'), where updater = exp(M* - M_row)
accum_sum_ = (column_guard) ? \
sum_accumulator_(intermediate, accum_sum_ * updater) : accum_sum_ * updater;
}
} else {
accum_sum_ = (column_guard) ? sum_accumulator_(intermediate, accum_sum_) : ElementSoftmaxCompute(0);
}
// Convert to the output
NumericArrayConverter<ElementOutput, ElementSoftmaxCompute, kElementsPerAccess> output_converter;
OutputVector &output = reinterpret_cast<OutputVector *>(&fragment_D_)[frag_idx];
output = output_converter(result);
}
/// Called at the end of a row
CUTLASS_DEVICE
void end_row(int row_idx) {
using ConvertSumOutput = cutlass::NumericConverter<ElementSum, ElementSoftmaxCompute>;
using ConvertNormOutput = cutlass::NumericConverter<ElementNorm, ElementSoftmaxCompute>;
ConvertSumOutput convert_sum_output;
ConvertNormOutput convert_norm_output;
// Compute accumulate sum only in the last step
accum_sum_ = warp_reduce_sum_(accum_sum_);
bool is_first_thread_in_tile = ((threadIdx.x % kThreadsPerRow) == 0);
bool row_guard = thread_offset_.row() < extent_.row();
bool is_write_thread = row_guard && is_first_thread_in_tile;
int block_batch = blockIdx.z;
ElementNorm *curr_ptr_max = ptr_Max_ + thread_offset_.row() + column_offset_ + block_batch * params_.batch_stride_Max;
ElementSum *curr_ptr_sum = ptr_Sum_ + thread_offset_.row() + column_offset_ + block_batch * params_.batch_stride_Sum;
arch::global_store<ElementNorm, sizeof(ElementNorm)>(
convert_norm_output(accum_max_),
(void *)curr_ptr_max,
is_write_thread);
arch::global_store<ElementSum, sizeof(ElementSum)>(
convert_sum_output(accum_sum_),
(void *)curr_ptr_sum,
is_write_thread);
// Clear accumulators for max and sum when finishing a whole row
clear_accum_();
}
/// Called after all accumulator elements have been visited
CUTLASS_DEVICE
void end_step(int step_idx) {
iterator_D_.store(fragment_D_);
++iterator_D_;
}
/// Called after all steps have been completed
CUTLASS_DEVICE
void end_epilogue() {
}
private:
CUTLASS_DEVICE
void elementwise_padding_(SoftmaxFragment &result, int elements_in_boundary) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < SoftmaxFragment::kElements; ++i) {
result[i] = (i < elements_in_boundary) ? result[i] : ElementSoftmaxCompute(-infinity_);
}
}
CUTLASS_DEVICE
ElementSoftmaxCompute warp_reduce_sum_(ElementSoftmaxCompute sum_) {
int half_thread_in_row = (kThreadsPerRow >> 1);
CUTLASS_PRAGMA_UNROLL
for (int i = half_thread_in_row; i > 0; i >>= 1) {
ElementSoftmaxCompute tmp = __shfl_xor_sync(0xFFFFFFFF, sum_, i);
sum_ += tmp;
}
return sum_;
}
CUTLASS_DEVICE
ElementSoftmaxCompute warp_reduce_max_(ElementSoftmaxCompute max_) {
int half_thread_in_row = (kThreadsPerRow >> 1);
CUTLASS_PRAGMA_UNROLL
for (int i = half_thread_in_row; i > 0; i >>= 1) {
ElementSoftmaxCompute tmp = __shfl_xor_sync(0xFFFFFFFF, max_, i);
max_ = fast_max(max_, tmp);
}
return max_;
}
CUTLASS_DEVICE
void clear_accum_() {
uint32_t float_max_bits = 0xff7fffff; // -FLT_MAX
float min_float = reinterpret_cast<float const &>(float_max_bits);
accum_max_ = ElementSoftmaxCompute(min_float);
accum_sum_ = ElementSoftmaxCompute(0);
}
CUTLASS_DEVICE
ElementSoftmaxCompute sum_accumulator_(SoftmaxFragment const &accum) {
ElementSoftmaxCompute sum_ = ElementSoftmaxCompute(0);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < SoftmaxFragment::kElements; ++i) {
sum_ += ElementSoftmaxCompute(accum[i]);
}
return sum_;
}
CUTLASS_DEVICE
ElementSoftmaxCompute sum_accumulator_(SoftmaxFragment const &accum, ElementSoftmaxCompute sum_) {
// ElementSoftmaxCompute sum_ = ElementSoftmaxCompute(0);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < SoftmaxFragment::kElements; ++i) {
sum_ += ElementSoftmaxCompute(accum[i]);
}
return sum_;
}
CUTLASS_DEVICE
ElementSoftmaxCompute maximum_accumulator_(SoftmaxFragment const &accum) {
ElementSoftmaxCompute max_ = accum[0];
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < SoftmaxFragment::kElements; ++i) {
max_ = fast_max(max_, ElementSoftmaxCompute(accum[i]));
}
return max_;
}
CUTLASS_DEVICE
ElementSoftmaxCompute maximum_accumulator_(SoftmaxFragment const &accum, ElementSoftmaxCompute max_) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < SoftmaxFragment::kElements; ++i) {
max_ = fast_max(max_, ElementSoftmaxCompute(accum[i]));
}
return max_;
}
};
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
| 16,804 | C | 31.694552 | 130 | 0.644965 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "predicated_tile_iterator.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines the optimal thread map for TensorOp accumulator layouts
template <
typename ThreadblockShape,
typename WarpShape,
int PartitionsK,
typename ElementOutput,
int ElementsPerAccess,
typename ElementAccumulator
>
struct DefaultThreadMapVoltaTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines the optimal thread map for TensorOp accumulator layouts
template <
typename ThreadblockShape_,
typename WarpShape_,
int PartitionsK,
typename ElementOutput_,
int ElementsPerAccess
>
struct DefaultThreadMapVoltaTensorOp<
ThreadblockShape_,
WarpShape_,
PartitionsK,
ElementOutput_,
ElementsPerAccess,
half_t> {
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
static int const kPartitionsK = PartitionsK;
using ElementOutput = ElementOutput_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementAccumulator = half_t;
//
// Definitions
//
struct Detail {
static int const kTensorOpRows = 16;
static int const kWarpSize = 32;
static int const kInterleavedTilesM = WarpShape::kM / 32;
static_assert(
!(ThreadblockShape::kM % WarpShape::kM) &&
!(ThreadblockShape::kN % WarpShape::kN), "Divisibility");
/// Number of warps
using WarpCount = gemm::GemmShape<
ThreadblockShape::kM / WarpShape::kM,
ThreadblockShape::kN / WarpShape::kN,
kPartitionsK
>;
/// Number of participating threads
static int const kThreads = WarpCount::kCount * kWarpSize;
using Shape = cutlass::epilogue::threadblock::OutputTileShape<
ThreadblockShape::kN, // column
4, // row
4, // group
WarpCount::kM, // cluster
1 // tile
>;
/// Number of iterations per subspace
using Count = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
2, // row
kInterleavedTilesM, // group
1, // cluster
WarpShape::kM / kTensorOpRows // iterations
>;
};
//
// ThreadMap
//
/// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap
using Type = OutputTileOptimalThreadMap <
typename Detail::Shape,
typename Detail::Count,
Detail::kThreads,
kElementsPerAccess,
sizeof_bits<ElementOutput>::value
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines the optimal thread map for TensorOp accumulator layouts
template <
typename ThreadblockShape_,
typename WarpShape_,
int PartitionsK,
typename ElementOutput_,
int ElementsPerAccess
>
struct DefaultThreadMapVoltaTensorOp<
ThreadblockShape_,
WarpShape_,
PartitionsK,
ElementOutput_,
ElementsPerAccess,
float> {
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
static int const kPartitionsK = PartitionsK;
using ElementOutput = ElementOutput_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementAccumulator = float;
//
// Definitions
//
struct Detail {
static int const kTensorOpRows = 16;
static int const kWarpSize = 32;
static int const kInterleavedTilesM = WarpShape::kM / 32;
static_assert(
!(ThreadblockShape::kM % WarpShape::kM) &&
!(ThreadblockShape::kN % WarpShape::kN), "Divisibility");
/// Number of warps
using WarpCount = gemm::GemmShape<
ThreadblockShape::kM / WarpShape::kM,
ThreadblockShape::kN / WarpShape::kN,
kPartitionsK
>;
/// Number of participating threads
static int const kThreads = WarpCount::kCount * kWarpSize;
using Shape = cutlass::epilogue::threadblock::OutputTileShape<
ThreadblockShape::kN, // column
4, // row
4, // group
WarpCount::kM, // cluster
1 // tile
>;
/// Number of iterations per subspace
using Count = cutlass::epilogue::threadblock::OutputTileShape<
1, // column
2, // row
kInterleavedTilesM, // group
1, // cluster
WarpShape::kM / kTensorOpRows // iterations
>;
};
//
// ThreadMap
//
/// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap
using Type = OutputTileOptimalThreadMap <
typename Detail::Shape,
typename Detail::Count,
Detail::kThreads,
kElementsPerAccess,
sizeof_bits<ElementOutput>::value
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 7,303 | C | 30.895196 | 101 | 0.596878 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_blas3.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
BlasMode BlasMode_ = BlasMode::kGemm ///< Tile Iterator for a Symmetric or Hermitian Kernel
>
class PredicatedTileIteratorBlas3 {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static BlasMode const kBlasMode = BlasMode_;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0");
static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0");
static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0");
static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
static_assert( AccessType::kElements == 1, "BLAS3 Epilogue must use AccessType::kElements as 1");
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout):
PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()
)
{
}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorParams params_;
/// Byte-level pointer
uint8_t *byte_pointer_;
/// Fill Mode for a tile on diagonal of a symmetric kernel
cutlass::FillMode fill_mode;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// Internal state counter
int state_[3];
/// Starting address of the matrix
size_t matrix_start_addr;
static_assert((kBlasMode == BlasMode::kSymmetric || kBlasMode == BlasMode::kHermitian),
"Unsupported blas3 mode.");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorBlas3(
PredicatedTileIteratorParams const & params,
Element *pointer,
TensorCoord extent,
int thread_idx,
TensorCoord threadblock_offset
, cutlass::FillMode fill_mode
):
params_(params), fill_mode(fill_mode)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
thread_start_row_ = thread_offset.row();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] = ((thread_offset.column()
+ ThreadMap::Delta::kColumn * c) < extent.column());
}
// Check Symmetric kernel modes (Lower and Upper - for diagonal CTAs, None for rest CTAs)
if ((kBlasMode == BlasMode::kSymmetric || kBlasMode == BlasMode::kHermitian) &&
fill_mode == cutlass::FillMode::kInvalid) {
arch::device_breakpoint();
}
// Starting address of the matrix
matrix_start_addr = reinterpret_cast<size_t>(pointer);
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) +
LongIndex(thread_offset.row()) * LongIndex(params_.stride) +
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn /
kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment on the diagonal of a symmetric kernel to memory
CUTLASS_DEVICE
void load_symmetric_with_byte_offset(Fragment &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
bool isLowerMode = (fill_mode == cutlass::FillMode::kLower) ? true : false;
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
// Offset of row from beginning of the matrix per thread
size_t row_start_offset = (size_t)memory_pointer - matrix_start_addr;
// Absolute row index
int row_index = int(row_start_offset/params_.stride);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
// Offset of column from beginning of row per thread
size_t col_start_offset = row_start_offset +
(column * ThreadMap::Delta::kColumn / kElementsPerAccess) * sizeof(AccessType);
// Absolute column index
size_t col_index = (col_start_offset%params_.stride)/sizeof(AccessType);
guard = guard && ( (isLowerMode && row_index >= col_index) ||
(!isLowerMode && row_index <= col_index) );
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn /
kElementsPerAccess],
guard);
// The imaginary parts of the diagonal elements of a complex element are assumed and set to zero
if (guard && kBlasMode == BlasMode::kHermitian && cutlass::is_complex<Element>::value) {
Element *scalar_ptr = reinterpret_cast<Element *>(frag_ptr);
if (row_index == col_index) {
scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column] =
real(scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column]);
}
}
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
if (fill_mode == cutlass::FillMode::kNone) {
load_with_byte_offset(frag, 0);
}
else {
load_symmetric_with_byte_offset(frag, 0);
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Stores a fragment on the diagonal of a symmetric kernel to memory
CUTLASS_DEVICE
void store_symmetric_with_byte_offset(Fragment const &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
bool isLowerMode = (fill_mode == cutlass::FillMode::kLower) ? true : false;
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
// Offset of row from beginning of the matrix per thread
size_t row_start_offset = (size_t)memory_pointer - matrix_start_addr;
// Absolute row index
int row_index = int(row_start_offset/params_.stride);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
// Offset of column from beginning of row per thread
size_t col_start_offset = row_start_offset +
(column * ThreadMap::Delta::kColumn / kElementsPerAccess) * sizeof(AccessType);
// Absolute column index
size_t col_index = (col_start_offset%params_.stride)/sizeof(AccessType);
guard = guard && ( (isLowerMode && row_index >= col_index) ||
(!isLowerMode && row_index <= col_index) );
// The imaginary parts of the diagonal elements of a complex element are assumed and set to zero
if (guard && kBlasMode == BlasMode::kHermitian && cutlass::is_complex<Element>::value) {
AccessType *frag_ptr_modify = const_cast<AccessType *>(frag_ptr);
Element *scalar_ptr = reinterpret_cast<Element *>(frag_ptr_modify);
if (row_index == col_index) {
scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column] =
real(scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column]);
}
}
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn /
kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
if (fill_mode == cutlass::FillMode::kNone) {
store_with_byte_offset(frag, 0);
}
else {
store_symmetric_with_byte_offset(frag, 0);
}
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorBlas3 &operator++() {
++state_[0];
byte_pointer_ += params_.advance_row;
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
byte_pointer_ += params_.advance_group;
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
byte_pointer_ += params_.advance_cluster;
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
byte_pointer_ += params_.advance_tile;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 21,249 | C | 32.51735 | 108 | 0.605158 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/shared_load_iterator_mixed.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops optimized for mixed-precision.
This assumes the shared memory tile is in a permuted layout which avoids bank conflicts on loading.
When the fragment is loaded into registers, it matches the row-major thread map assumed by
the predicated tile iterator writing to global memory.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load output tile from shared memory in epilogue.
///
/// Satisfies: ReadableTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Accumulator data type
int ElementSizeBits_, ///< Size of accumulator in bits
int OutputSizeBits_, ///< Size of output element in bits
int ElementsPerAccess, ///< Vector length of output vector
int ContiguousLanes ///< Number of lanes in the warp writing to contiguous elements
/// in the global memory tensor
>
class SharedLoadIteratorMixed;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load output tile from shared memory in epilogue.
///
/// Satisfies: ReadableTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_ ///< Accumulator data type
>
class SharedLoadIteratorMixed<ThreadMap_, Element_, 32, 16, 8, 8> {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kAlignment = ThreadMap::kElementsPerAccess * sizeof_bits<Element_>::value / 8;
static int const kThreads = ThreadMap::kThreads;
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster *
ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<
Element,
ThreadMap::kElementsPerAccess,
kAlignment>;
/// Vector type used for SMEM loads
using LoadType = AlignedArray<
Element,
const_min(128 / sizeof_bits<Element>::value, ThreadMap::kElementsPerAccess),
const_min(16, kAlignment)
>;
static int const kLoadsPerAccess = AccessType::kElements / LoadType::kElements;
private:
//
// Data members
//
/// Byte-level pointer
LoadType const *pointers_[kLoadsPerAccess];
/// Stride along adjacent rows in units of LoadType
int stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
SharedLoadIteratorMixed(
TensorRef ref,
int thread_idx
):
stride_((ref.stride(0) / LoadType::kElements)) {
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx);
// Initialize pointers
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
pointers_[i] = reinterpret_cast<LoadType const *>(ref.data());
int col_idx = (thread_offset.column() / kElementsPerAccess) * kLoadsPerAccess;
int bank_offset = (col_idx * int(sizeof(LoadType)) / 128) % kLoadsPerAccess;
col_idx += (bank_offset + i) % kLoadsPerAccess;
pointers_[i] += thread_offset.row() * stride_ + col_idx;
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
pointers_[i] += pointer_offset / LoadType::kElements;
}
}
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &offset) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
pointers_[i] +=
offset.row() * Shape::kRow * stride_ +
offset.column() * Shape::kColumn / LoadType::kElements;
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int row_ptr_offset =
row * ThreadMap::Delta::kRow * stride_ +
group * ThreadMap::Delta::kGroup* stride_ +
cluster * ThreadMap::Delta::kCluster * stride_ +
pointer_offset / LoadType::kElements;
int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kLoadsPerAccess; ++v) {
int vector_idx = (column * ThreadMap::Delta::kColumn / kElementsPerAccess * kLoadsPerAccess);
LoadType const *memory_pointer = pointers_[v] + row_ptr_offset;
frag_ptr[frag_idx * kLoadsPerAccess + v] = memory_pointer[vector_idx];
}
}
}
}
}
}
/// Set base smem address
CUTLASS_DEVICE
void set_smem_base_address(Index address) {}
/// Loads a fragment
CUTLASS_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for int32_t x 16 => int8_t/int4b_t x 16
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
int OutputSizeBits_ ///< Size of output element in bits
>
class SharedLoadIteratorMixed<ThreadMap_, int32_t, 32, OutputSizeBits_, 16, 8> {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = int32_t;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kAlignment = 16;
static int const kThreads = ThreadMap::kThreads;
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster *
ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<
Element,
16,
kAlignment>;
/// Vector type used for SMEM loads
using LoadType = AlignedArray<
Element,
4,
16
>;
static int const kLoadsPerAccess = 4;
private:
//
// Data members
//
/// Byte-level pointer
LoadType const *pointers_[kLoadsPerAccess];
/// Stride along adjacent rows in units of LoadType
int stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
SharedLoadIteratorMixed(
TensorRef ref,
int thread_idx
):
stride_((ref.stride(0) / LoadType::kElements)) {
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx);
// Initialize pointers
LoadType const *base_ptr = reinterpret_cast<LoadType const *>(ref.data()) + thread_offset.row() * stride_;
int lane_col_idx = thread_offset.column() / 16;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
int lane_offset = (lane_col_idx % 2) * 4 | ((lane_col_idx / 2) * 8) | ((lane_col_idx / 2) ^ i);
pointers_[i] = base_ptr + lane_offset;
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
pointers_[i] += pointer_offset / LoadType::kElements;
}
}
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &offset) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
pointers_[i] +=
offset.row() * Shape::kRow * stride_ +
offset.column() * Shape::kColumn / LoadType::kElements;
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int row_ptr_offset =
row * ThreadMap::Delta::kRow * stride_ +
group * ThreadMap::Delta::kGroup* stride_ +
cluster * ThreadMap::Delta::kCluster * stride_ +
pointer_offset / LoadType::kElements;
int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kLoadsPerAccess; ++v) {
LoadType const *memory_pointer = pointers_[v];
frag_ptr[frag_idx * kLoadsPerAccess + v] = memory_pointer[row_ptr_offset];
}
}
}
}
}
}
/// Set base smem address
CUTLASS_DEVICE
void set_smem_base_address(Index address) {}
/// Loads a fragment
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for int32_t x 8 => int8_t/int4b_t x 8
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
int OutputSizeBits_
>
class SharedLoadIteratorMixed<ThreadMap_, int32_t, 32, OutputSizeBits_, 8, 8> {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = int32_t;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kAlignment = 8;
static int const kThreads = ThreadMap::kThreads;
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster *
ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<
Element,
8,
kAlignment>;
/// Vector type used for SMEM loads
using LoadType = AlignedArray<
Element,
4,
16
>;
static int const kLoadsPerAccess = 2;
private:
//
// Data members
//
/// Byte-level pointer
LoadType const *pointers_[kLoadsPerAccess];
/// Stride along adjacent rows in units of LoadType
int stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
SharedLoadIteratorMixed(
TensorRef ref,
int thread_idx
):
stride_((ref.stride(0) / LoadType::kElements)) {
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx);
// Initialize pointers
LoadType const *base_ptr = reinterpret_cast<LoadType const *>(ref.data()) + thread_offset.row() * stride_;
int lane_col_idx = thread_offset.column() / 8;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
int lane_offset = (lane_col_idx % 8) * 2 | ((lane_col_idx / 4) ^ i);
pointers_[i] = base_ptr + lane_offset;
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
pointers_[i] += pointer_offset / LoadType::kElements;
}
}
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &offset) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kLoadsPerAccess; ++i) {
pointers_[i] +=
offset.row() * Shape::kRow * stride_ +
offset.column() * Shape::kColumn / LoadType::kElements;
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int row_ptr_offset =
row * ThreadMap::Delta::kRow * stride_ +
group * ThreadMap::Delta::kGroup* stride_ +
cluster * ThreadMap::Delta::kCluster * stride_ +
pointer_offset / LoadType::kElements;
int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kLoadsPerAccess; ++v) {
LoadType const *memory_pointer = pointers_[v];
frag_ptr[frag_idx * kLoadsPerAccess + v] = memory_pointer[row_ptr_offset];
}
}
}
}
}
}
/// Set base smem address
CUTLASS_DEVICE
void set_smem_base_address(Index address) {}
/// Loads a fragment
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 17,683 | C | 29.177474 | 117 | 0.623084 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
/// It provides a fast path for the case Rank = 2 which does not need div/rem to
/// calculate modes.
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
int Rank
>
class PredicatedTileIteratorAffineRankN {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::AffineRankN<Rank>;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = typename Layout::TensorCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0");
static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0");
static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0");
static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0");
static_assert( !(Layout::kRank % 2),
"Layout rank must be even. This assumes the first half of the modes correspond to the 'row' "
"and the second half of the modes correspond to the 'column'");
static bool const kBigEndian = false;
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Parameters structure
struct Params {
//
// Data members
//
Layout layout;
/// Stride in units of bytes along M modes
Coord<Layout::kRank/2, typename Layout::LongIndex> stride_m;
/// Stride in units of bytes along N modes
Coord<Layout::kRank/2, typename Layout::LongIndex> stride_n;
/// Fast divmod objects divided by tensor extents
FastDivmod divmod_m[(Layout::kRank == 2) ? 1 : (Layout::kRank/2 - 1)];
/// Fast divmod objects divided by tensor extents
FastDivmod divmod_n[(Layout::kRank == 2) ? 1 : (Layout::kRank/2 - 1)];
int64_t rank2_inc_col;
int64_t rank2_inc_row;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(TensorCoord const &extent, Layout const &layout_): layout(layout_) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2; ++i) {
stride_m[i] = OffsetBytes<Element>(layout_.stride()[i]);
stride_n[i] = OffsetBytes<Element>(layout_.stride()[i + Layout::kRank / 2]);
}
if (kBigEndian) {
// "Big Endian" scheme
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2 - 1; ++i) {
divmod_m[i] = FastDivmod(extent[i + 1]);
divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2 + 1]);
}
}
else {
// "Little Endian" scheme
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2 - 1; ++i) {
divmod_m[i] = FastDivmod(extent[i]);
divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2]);
}
}
#if 0
//
// Debug print statements to verify extents and strides are passed correctly.
//
printf("PredicatedTileIteratorAffine::Params() entered\n");
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank; ++i) {
printf(" extent[%d]: %d\n", i, extent[i]);
}
for (int i = 0; i < Layout::kRank; ++i) {
printf(" stride[%d]: %ld\n", i, layout_.stride()[i]);
}
printf("PredicatedTileIteratorAffine::Params() returning\n");
#endif
}
CUTLASS_HOST_DEVICE
Params(Layout const &layout_): layout(layout_) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2; ++i) {
stride_m[i] = OffsetBytes<Element>(layout_.stride()[i]);
stride_n[i] = OffsetBytes<Element>(layout_.stride()[i + Layout::kRank / 2]);
}
rank2_inc_col = ThreadMap::Delta::kColumn * stride_n[0];
rank2_inc_row = ThreadMap::Delta::kRow * stride_m[0];
}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
Params params_;
/// Byte-level pointer
uint8_t *byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in columns
Index extent_col_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// A thread's starting column position (assuming steady-state predicates have been computed)
Index thread_start_column_;
/// Internal state counter
int state_[3];
/// Offsets in columns, cached for performance
int64_t offset_modes_n_[ThreadMap::Iterations::kColumn];
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorAffineRankN(
Params const & params,
Element *pointer,
MatrixCoord extent,
int thread_idx,
MatrixCoord threadblock_offset = MatrixCoord(),
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
):
params_(params)
{
MatrixCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
extent_col_ = extent.column();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
if (Layout::kRank > 2) {
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
//
// Compute coordinate and decompose into N modes
//
int coord_n = thread_start_column_ + c * ThreadMap::Delta::kColumn;
mask_.predicates[c] = coord_n < extent.column();
Coord<Layout::kRank / 2, Index> modes_n;
int64_t offset_modes_n = 0;
if (kBigEndian) {
modes_n = CoordinateDecomposition<Layout::kRank / 2>(coord_n, params_.divmod_n);
offset_modes_n = dot(modes_n, params_.stride_n);
}
else {
modes_n = CoordinateDecompositionLittleEndian<Layout::kRank / 2>(coord_n, params_.divmod_n);
offset_modes_n = dot(modes_n, params_.stride_n);
}
offset_modes_n_[c] = offset_modes_n;
}
if (!pointer) {
mask_.clear();
}
}
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t *>(pointer);
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, int64_t byte_offset) {
uint8_t const *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
int row_begin = thread_start_row_ + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster;
int64_t offset_modes_m = row_begin * params_.stride_m[0];
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
//
// Compute coordinate and decompose into M modes
//
int coord_m = row * ThreadMap::Delta::kRow + row_begin;
Coord<Layout::kRank / 2, Index> modes_m;
if (Layout::kRank > 2) {
if (kBigEndian) {
modes_m = CoordinateDecomposition<Layout::kRank / 2>(coord_m, params_.divmod_m);
} else {
modes_m = CoordinateDecompositionLittleEndian<Layout::kRank / 2>(coord_m, params_.divmod_m);
}
offset_modes_m = dot(modes_m, params_.stride_m);
}
//
// Compute the offset due to modes M
//
bool row_guard = (coord_m < extent_row_);
int64_t offset_modes_n = thread_start_column_ * params_.stride_n[0];
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
//
// Compute coordinate and decompose into N modes
//
if (Layout::kRank > 2) {
offset_modes_n = offset_modes_n_[column];
}
//
// Compute the pointer and access
//
bool guard;
if (Layout::kRank > 2) {
guard = row_guard && mask_.predicates[column];
} else {
guard = (coord_m < extent_row_) &&
((thread_start_column_ + ThreadMap::Delta::kColumn * column) < extent_col_);
}
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void *)(byte_pointer + offset_modes_m + offset_modes_n + byte_offset),
guard
);
if (Layout::kRank == 2) {
offset_modes_n += params_.rank2_inc_col;
}
}
if (Layout::kRank == 2) {
offset_modes_m += params_.rank2_inc_row;
}
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) {
uint8_t *byte_pointer = byte_pointer_;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
int row_begin = thread_start_row_ + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster;
int64_t offset_modes_m = row_begin * params_.stride_m[0];
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
//
// Compute coordinate and decompose into M modes
//
int coord_m = row * ThreadMap::Delta::kRow + row_begin;
Coord<Layout::kRank / 2, Index> modes_m;
if (Layout::kRank > 2) {
if (kBigEndian) {
modes_m = CoordinateDecomposition<Layout::kRank / 2>(coord_m, params_.divmod_m);
} else {
modes_m = CoordinateDecompositionLittleEndian<Layout::kRank / 2>(coord_m, params_.divmod_m);
}
offset_modes_m = dot(modes_m, params_.stride_m);
}
//
// Compute the offset due to modes M
//
bool row_guard = (coord_m < extent_row_);
int64_t offset_modes_n = thread_start_column_ * params_.stride_n[0];
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
//
// Compute coordinate and decompose into N modes
//
if (Layout::kRank > 2) {
offset_modes_n = offset_modes_n_[column];
}
//
// Compute the pointer and access
//
bool guard;
if (Layout::kRank > 2) {
guard = row_guard && mask_.predicates[column];
} else {
guard = (coord_m < extent_row_) && ((thread_start_column_ + ThreadMap::Delta::kColumn * column) < extent_col_);
}
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void *)(byte_pointer + offset_modes_m + offset_modes_n + byte_offset),
guard);
if (Layout::kRank == 2) {
offset_modes_n += params_.rank2_inc_col;
}
}
if (Layout::kRank == 2) {
offset_modes_m += params_.rank2_inc_row;
}
}
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_byte_offset(frag, 0);
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorAffineRankN &operator++() {
++state_[0];
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 18,821 | C | 29.555195 | 125 | 0.591042 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_gemm_k_reduction.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/numeric_types.h"
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename ElementAccumulator_,
typename ElementOutput_,
typename ThreadBlockShape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
bool ReduceKForA_
>
class EpilogueGemmKReduction {
public:
using ThreadBlockShape = ThreadBlockShape_;
using WarpMmaOperator = WarpMmaOperator_;
using WarpShape = typename WarpMmaOperator::Shape;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// Accumulator element
using ElementAccumulator = ElementAccumulator_;
/// Output element
using ElementOutput = ElementOutput_;
/// Output access size
static int const kElementsPerAccess = 1;
static bool const kReduceKForA = ReduceKForA_;
static int const kThreadBlockSize = kReduceKForA ? ThreadBlockShape::kM : ThreadBlockShape::kN;
static int const kWarpSize = kReduceKForA ? WarpShape::kM : WarpShape::kN;
static int const kIterations = kWarpSize / 8;
using FragmentAccumulator = Array<ElementAccumulator, kIterations>;
private:
int thread_offset_;
ElementOutput* pointer_;
int col_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueGemmKReduction(
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx, ///< Id of thread within warp
int threadblock_offset,
ElementOutput* pointer
)
{
col_ = lane_idx % 4;
thread_offset_ = threadblock_offset * kThreadBlockSize
+ warp_idx * kWarpSize
+ lane_idx / 4 + col_ * 8;
pointer_ = pointer + LongIndex(thread_offset_);
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
int size,
FragmentAccumulator &gemm_k_with_reduction_accumulation,
bool LoadForSerialSplitK
) {
bool guard[kIterations / 4];
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations / 4; ++i) {
guard[i] = ((thread_offset_ + i * 32) < size);
}
Array<ElementOutput, kIterations / 4> source;
source.clear();
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations / 4; ++i) {
ElementOutput *source_ptr = reinterpret_cast<ElementOutput *>(&source);
cutlass::arch::global_load<ElementOutput, sizeof(ElementOutput)>(
source_ptr[i],
(void *)(pointer_ + i * 32),
guard[i] && LoadForSerialSplitK);
}
FragmentAccumulator sum = gemm_k_with_reduction_accumulation;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations; ++i) {
sum[i] += __shfl_xor_sync(0xffffffff, sum[i], 1);
sum[i] += __shfl_xor_sync(0xffffffff, sum[i], 2);
}
Array<ElementAccumulator, kIterations / 4> intermediate;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations / 4; ++i) {
if (col_ == 0) {
intermediate[i] = sum[0 + i * 4];
}
if (col_ == 1) {
intermediate[i] = sum[1 + i * 4];
}
if (col_ == 2) {
intermediate[i] = sum[2 + i * 4];
}
if (col_ == 3) {
intermediate[i] = sum[3 + i * 4];
}
}
NumericArrayConverter<ElementAccumulator, ElementOutput, kIterations / 4> source_converter;
Array<ElementAccumulator, kIterations / 4> converted_source = source_converter(source);
plus<Array<ElementAccumulator, kIterations / 4>> plus_source;
intermediate = plus_source(intermediate, converted_source);
NumericArrayConverter<ElementOutput, ElementAccumulator, kIterations / 4> converter;
Array<ElementOutput, kIterations / 4> result = converter(intermediate);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations / 4; ++i) {
cutlass::arch::global_store<ElementOutput, sizeof(ElementOutput)>(result[i],
(void *)(pointer_ + i * 32), guard[i]);
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 7,401 | C | 33.751174 | 107 | 0.624645 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_base_streamk.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Basic subset of epilogue functionality for supporting StreamK decompositions
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "cutlass/block_striped.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// StreamK epilogue functionality for cross-block accumulator fragment reduction
template <
typename Shape, ///< Shape of threadblock tile (concept: GemmShape)
int PartitionsK,
typename WarpMmaOperator, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
typename AccumulatorFragmentIterator> ///< Iterator for enumerating fragments within the per-thread tile of raw accumulators
class EpilogueBaseStreamK
{
protected:
/// The per-thread tile of raw accumulators
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Number of warps
using WarpCount = gemm::GemmShape<
Shape::kM / WarpMmaOperator::Shape::kM,
Shape::kN / WarpMmaOperator::Shape::kN,
PartitionsK>;
/// Number of threads per block
static int const kBlockThreads = 32 * WarpCount::kCount;
/// Numerical accumulation element type
using ElementAccumulator = typename WarpMmaOperator::ElementC;
/// Fragment type used by the accumulator tile's fragment iterator
using AccumulatorFragment = typename AccumulatorFragmentIterator::Fragment;
public:
/// Number of AccumulatorTile fragments per thread
static int const kAccumulatorFragments = AccumulatorFragmentIterator::Policy::kIterations;
protected:
/// Number of AccumulatorTile fragments per block output tile
static int const kOutputTileFragments = kBlockThreads * kAccumulatorFragments;
/// Block-striped transfer utility for sharing AccumulatorFragment
using BlockStripedT = BlockStriped<kBlockThreads, AccumulatorFragment>;
/// AccumulatorFragment stride in the shared workspace between different peer blocks (each thread block can share accumulators for up to two block output tiles)
static const int kPeerFragmentStride = kOutputTileFragments * 2;
public:
/// Workspace bytes per thread block
static size_t const kWorkspaceBytesPerBlock =sizeof(AccumulatorFragment) * kPeerFragmentStride;
public:
/// Thread index in the threadblock
int thread_idx;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueBaseStreamK(
int thread_idx) ///< ID of a thread within the threadblock
:
thread_idx(thread_idx)
{}
/// Aggregates the accumulator sets shared by peer blocks in the global workspace
CUTLASS_DEVICE
void reduce(
AccumulatorFragment &accum_fragment, ///< [out] sum of all shared accumulator fragments for these peer partials
int peer_idx_begin,
int peer_idx_end,
int reduce_fragment_idx,
void *workspace_ptr)
{
plus<AccumulatorFragment> add_fragments;
AccumulatorFragment *fragment_workspace = reinterpret_cast<AccumulatorFragment *>(workspace_ptr);
int fragment_offset = (peer_idx_begin * kPeerFragmentStride) + (reduce_fragment_idx * kBlockThreads);
// Load first peer fragment
BlockStripedT::load(accum_fragment, fragment_workspace + fragment_offset, this->thread_idx);
fragment_offset += kPeerFragmentStride; // Move to next peer
fragment_offset += kOutputTileFragments; // Move to the set of fragments for this peer's "non-started" output tile
// Reduce fragments from additional peers
#pragma unroll 2
for (; fragment_offset < peer_idx_end * kPeerFragmentStride; fragment_offset += kPeerFragmentStride)
{
// Load peer fragment
AccumulatorFragment addend_fragment;
BlockStripedT::load(addend_fragment, fragment_workspace + fragment_offset, this->thread_idx);
// Add peer fragment
accum_fragment = add_fragments(accum_fragment, addend_fragment);
}
}
/// Shares the accumulator set with peers in the global workspace
CUTLASS_DEVICE
void share(
int peer_idx,
void *workspace_ptr,
AccumulatorTile const &accumulators,
bool started_tile) ///< Whether this thread block computed the first work volume for the current output tile
{
AccumulatorFragment *fragment_workspace = reinterpret_cast<AccumulatorFragment *>(workspace_ptr);
int fragment_offset = peer_idx * kPeerFragmentStride;
if (!started_tile) {
// Move to the set of fragments for the "non-started" output tile
fragment_offset += kOutputTileFragments;
}
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
// Convert raw accumulator tile to fragments and store
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < kAccumulatorFragments; ++iter)
{
// Acquire reordered accumulator fragment
AccumulatorFragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
// Store accumulator fragment
BlockStripedT::store(fragment_workspace + fragment_offset, accum_fragment, this->thread_idx);
fragment_offset += kBlockThreads;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 7,455 | C | 36.656565 | 162 | 0.674983 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_planar_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Constructs a default epilogue for planar complex outputs.
This template reuses components for real-valued epilogues and applies them to planar complex
output matrices.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/array_planar_complex.h"
#include "cutlass/arch/arch.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/epilogue_planar_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues.
template <
typename ThreadblockShape_,
typename WarpMma_,
typename OpcodeClass_,
typename ArchTag_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpiloguePlanarComplex;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues.
template <
typename ThreadblockShape_,
typename WarpMmaOperator_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
arch::OpClassTensorOp,
arch::Sm70,
PartitionsK,
OutputOp_,
ElementsPerAccess> {
using RealEpilogue = DefaultEpilogueVoltaTensorOp<
ThreadblockShape_,
WarpMmaOperator_,
PartitionsK,
OutputOp_,
ElementsPerAccess
>;
using Epilogue = EpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
PartitionsK,
typename RealEpilogue::OutputTileIterator,
typename RealEpilogue::AccumulatorFragmentIterator,
typename RealEpilogue::WarpTileIterator,
typename RealEpilogue::SharedLoadIterator,
OutputOp_,
typename RealEpilogue::Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues.
template <
typename ThreadblockShape_,
typename WarpMmaOperator_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
arch::OpClassTensorOp,
arch::Sm75,
PartitionsK,
OutputOp_,
ElementsPerAccess> {
using RealEpilogue = DefaultEpilogueTensorOp<
ThreadblockShape_,
WarpMmaOperator_,
PartitionsK,
OutputOp_,
ElementsPerAccess
>;
using Epilogue = EpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
PartitionsK,
typename RealEpilogue::OutputTileIterator,
typename RealEpilogue::AccumulatorFragmentIterator,
typename RealEpilogue::WarpTileIterator,
typename RealEpilogue::SharedLoadIterator,
OutputOp_,
typename RealEpilogue::Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues.
template <
typename ThreadblockShape_,
typename WarpMmaOperator_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
arch::OpClassTensorOp,
arch::Sm80,
PartitionsK,
OutputOp_,
ElementsPerAccess> {
using RealEpilogue = DefaultEpilogueTensorOp<
ThreadblockShape_,
WarpMmaOperator_,
PartitionsK,
OutputOp_,
ElementsPerAccess
>;
using Epilogue = EpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
PartitionsK,
typename RealEpilogue::OutputTileIterator,
typename RealEpilogue::AccumulatorFragmentIterator,
typename RealEpilogue::WarpTileIterator,
typename RealEpilogue::SharedLoadIterator,
OutputOp_,
typename RealEpilogue::Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues.
template <
typename ThreadblockShape_,
typename WarpMmaOperator_,
typename ArchTag_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
arch::OpClassSimt,
ArchTag_,
PartitionsK,
OutputOp_,
ElementsPerAccess> {
using RealEpilogue = DefaultEpilogueSimt<
ThreadblockShape_,
WarpMmaOperator_,
OutputOp_,
ElementsPerAccess
>;
using Epilogue = EpiloguePlanarComplex<
ThreadblockShape_,
WarpMmaOperator_,
PartitionsK,
typename RealEpilogue::OutputTileIterator,
typename RealEpilogue::AccumulatorFragmentIterator,
typename RealEpilogue::WarpTileIterator,
typename RealEpilogue::SharedLoadIterator,
OutputOp_,
typename RealEpilogue::Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 7,209 | C | 28.793388 | 100 | 0.654876 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/permute.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
bool ScatterD = false, ///< Scatter D operand or not
typename PermuteDLayout = layout::NoPermute, ///< Permute D operand or not
bool UseCUDAStore = false
>
class PredicatedTileIterator {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0");
static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0");
static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0");
static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout):
PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()
)
{ }
CUTLASS_HOST_DEVICE
Params(Base const &base) :
Base(base) { }
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorParams params_;
/// Byte-level pointer. This pointer is usually for both load() and store(), unless PermuteD is performed. When having PermuteD, byte_pointer_ is only for load().
uint8_t *byte_pointer_;
/// Byte-level pointer for store(). Due to PermuteD Op, store_byte_pointer_ may be with different address computation compared to byte_pointer_.
uint8_t *store_byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in rows
Index extent_column_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// A thread's starting column
Index thread_start_column_;
/// Internal state counter
int state_[3];
/// Scatter indices
int const *indices_;
/// Whether to perform Permute Op
bool PermuteD;
/// PermuteDLayout
mutable PermuteDLayout permute_layout_;
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIterator(
PredicatedTileIteratorParams const & params,
Element *pointer,
TensorCoord extent,
int thread_idx,
TensorCoord threadblock_offset = TensorCoord(),
int const *indices = nullptr
):
params_(params), indices_(indices)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
extent_column_ = extent.column();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] = ((thread_offset.column()
+ ThreadMap::Delta::kColumn * c) < extent.column());
}
// Null pointer performs no accesses
if (!pointer) {
mask_.clear();
}
if (ScatterD && !indices) {
mask_.clear();
}
// Initialize byte_pointer_
byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) +
LongIndex(thread_offset.row()) * LongIndex(params_.stride) +
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
if (ScatterD) {
byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) +
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
}
// store_byte_pointer_ is set to be the same with byte_pointer_ unless PermuteD is used.
store_byte_pointer_ = byte_pointer_;
// Initialize PermuteD. If PermuteD is true, store_byte_pointer_ is initialized accordingly.
if (platform::is_same<PermuteDLayout, layout::NoPermute>::value) {
PermuteD = false;
}else{
PermuteD = true;
store_byte_pointer_ = reinterpret_cast<uint8_t *>(pointer);
permute_layout_ = PermuteDLayout(extent,
params_.stride * kElementsPerAccess / sizeof(AccessType));
}
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
store_byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, int64_t byte_offset) const {
uint8_t *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
if (ScatterD && row_guard) {
assert(indices_);
memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset +
LongIndex(indices_[row_offset + thread_start_row_]) * LongIndex(params_.stride));
}
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn /
kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
if (!ScatterD) {
byte_pointer += params_.increment_row;
}
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) const {
uint8_t *byte_pointer = store_byte_pointer_;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
if (ScatterD && row_guard) {
assert(indices_);
memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset +
LongIndex(indices_[row_offset + thread_start_row_]) * LongIndex(params_.stride));
}
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
int col_offset = column * ThreadMap::Delta::kColumn;
if (PermuteD) {
int col = col_offset + thread_start_column_;
int row = row_offset + thread_start_row_;
TensorCoord init_coord(row, col);
// Locate memory_pointer
memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset
+ permute_layout_(init_coord) * sizeof(AccessType) / kElementsPerAccess);
}
if (UseCUDAStore) {
if (guard) {
memory_pointer[0] =
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column];
}
} else {
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void *)&memory_pointer[0],
guard);
}
if (!PermuteD) {
memory_pointer += (ThreadMap::Delta::kColumn / kElementsPerAccess);
}
}
if (row + 1 < ThreadMap::Iterations::kRow) {
if (!ScatterD && !PermuteD) {
byte_pointer += params_.increment_row;
}
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) const {
store_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void downsample_load_with_byte_offset(Fragment &frag, int64_t byte_offset, int convolution_P, int convolution_Q, int add_P, int add_Q, int problem_N) const {
uint8_t *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
int output_row = row_offset + thread_start_row_;
int output_N = output_row / (convolution_P * convolution_Q);
int output_PQ = output_row % (convolution_P * convolution_Q);
int output_P = output_PQ / convolution_Q;
int output_Q = output_PQ % convolution_Q;
int input_row = output_N * 2 * convolution_P * 2 * convolution_Q +
(2 * output_P + add_P) * 2 * convolution_Q + 2 * output_Q + add_Q;
int64_t byte_offset = (input_row-output_row)*problem_N*sizeof(float);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn /
kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void upsample_load_with_byte_offset(Fragment &frag, int64_t byte_offset, int convolution_P, int convolution_Q, int add_P, int add_Q, int problem_N) const {
uint8_t *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow
+ group * ThreadMap::Delta::kGroup
+ cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
int output_row = row_offset + thread_start_row_;
int output_N = output_row / (convolution_P * convolution_Q);
int output_PQ = output_row % (convolution_P * convolution_Q);
int output_P = output_PQ / convolution_Q;
int output_Q = output_PQ % convolution_Q;
int row_add_P = add_P;
int row_add_Q = add_Q;
if (output_P > convolution_P - 2) row_add_P = 0;
if (output_Q > convolution_Q - 2) row_add_Q = 0;
int input_row = output_N * (convolution_P/2) * (convolution_Q/2) +
((output_P + row_add_P)/2) * (convolution_Q/2) + (output_Q + row_add_Q)/2;
int64_t byte_offset = (input_row-output_row)*problem_N*sizeof(float);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
column],
(void *)&memory_pointer[column * ThreadMap::Delta::kColumn /
kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
CUTLASS_DEVICE
MatrixCoord thread_start() const {
return MatrixCoord(thread_start_row_, thread_start_column_);
}
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_row() const {
return thread_start_row_;
}
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_column() const {
return thread_start_column_;
}
/// Extent of the matrix in rows
CUTLASS_DEVICE
Index extent_row() const {
return extent_row_;
}
/// Extent of the matrix in columns
CUTLASS_DEVICE
Index extent_column() const {
return extent_column_;
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIterator &operator++() {
++state_[0];
if (!ScatterD && !PermuteD) {
store_byte_pointer_ += params_.advance_row;
}
if (!ScatterD) {
byte_pointer_ += params_.advance_row;
}
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
byte_pointer_ += params_.advance_group;
store_byte_pointer_ += params_.advance_group;
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
byte_pointer_ += params_.advance_cluster;
store_byte_pointer_ += params_.advance_cluster;
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
byte_pointer_ += params_.advance_tile;
store_byte_pointer_ += params_.advance_tile;
thread_start_row_ += ThreadMap::Shape::kGroup * ThreadMap::Shape::kRow
* ThreadMap::Shape::kCluster * ThreadMap::Shape::kTile;
}
}
}
return *this;
}
/// Advances a number of positions to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIterator &operator+=(int increment)
{
// Row
state_[0] += increment;
int increment_row = state_[0] / ThreadMap::Count::kRow;
state_[0] = state_[0] % ThreadMap::Count::kRow;
byte_pointer_ += (params_.advance_row * increment);
store_byte_pointer_ += (params_.advance_row * increment);
thread_start_row_ += (ThreadMap::Shape::kRow * increment);
// Group
state_[1] += increment_row;
int increment_group = state_[1] / ThreadMap::Count::kGroup;
state_[1] = state_[1] % ThreadMap::Count::kGroup;
byte_pointer_ += (params_.advance_group * increment_row);
store_byte_pointer_ += (params_.advance_group * increment_row);
thread_start_row_ +=
(ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow *
ThreadMap::Count::kRow *
increment_row;
// Cluster
state_[2] += increment_group;
int increment_cluster = state_[2] / ThreadMap::Count::kCluster;
state_[2] = state_[2] % ThreadMap::Count::kCluster;
byte_pointer_ += (params_.advance_cluster * increment_group);
store_byte_pointer_ += (params_.advance_cluster * increment_group);
thread_start_row_ +=
ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup *
ThreadMap::Count::kRow *
ThreadMap::Shape::kRow *
increment_group;
// Tile
byte_pointer_ += (params_.advance_tile * increment_cluster);
store_byte_pointer_ += (params_.advance_tile * increment_cluster);
thread_start_row_ +=
ThreadMap::Shape::kGroup *
ThreadMap::Shape::kRow *
ThreadMap::Shape::kCluster *
ThreadMap::Shape::kTile *
increment_cluster;
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) const {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | InterleavedPredicatedTileIterator | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
int InterleavedN ///< Number of Interleaved N
>
class InterleavedPredicatedTileIterator {
public:
using ThreadMap = ThreadMap_;
using Element = Element_;
using Layout = layout::ColumnMajorInterleaved<InterleavedN>;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = layout::PitchLinearCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Iterations::kCount;
/// Fragment object
using Fragment = Array<Element, ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
/// Uses a non-template class
struct Params : InterleavedPredicatedTileIteratorParams {
using Base = InterleavedPredicatedTileIteratorParams;
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout):
Base(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_InterleavedPredicatedTileIteratorDesc<Element, ThreadMap>()
) { }
CUTLASS_HOST_DEVICE
Params(Base const &base) :
Base(base) { }
};
/// Mask object
struct Mask {
static int const kCount = (ThreadMap::Iterations::kContiguous < 8)
? 8
: ThreadMap::Iterations::kContiguous;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
Params params_;
/// Byte-level pointer
uint8_t *byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in columns
Index extent_col_;
/// A thread's starting column position (assuming steady-state predicates have
/// been computed)
Index thread_start_col_;
/// Internal iteration counter
int iteration_contiguous_;
int iteration_strided_;
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
InterleavedPredicatedTileIterator(
Params const & params,
Element *pointer,
TensorCoord extent,
int thread_idx,
TensorCoord threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
):
params_(params) {
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) +
TensorCoord(threadblock_offset.contiguous() * InterleavedN,
threadblock_offset.strided() / InterleavedN);
extent_col_ = extent.strided() / InterleavedN;
thread_start_col_ = thread_offset.strided();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
mask_.predicates[c] =
((thread_offset.contiguous() + ThreadMap::Delta::kContiguous * c) <
(extent.contiguous() * InterleavedN));
}
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) +
LongIndex(thread_offset.strided()) * LongIndex(params_.stride) +
LongIndex(thread_offset.contiguous()) * sizeof(AccessType) / kElementsPerAccess;
// Initialize internal state counter
iteration_contiguous_ = iteration_strided_ = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
uint8_t *byte_pointer = byte_pointer_;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer);
int col_offset = iteration_strided_ * ThreadMap::Delta::kStrided;
bool col_guard = ((thread_start_col_ + col_offset) < extent_col_);
bool guard = col_guard && mask_.predicates[iteration_contiguous_];
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
*frag_ptr,
(void *)memory_pointer,
guard);
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
uint8_t *byte_pointer = byte_pointer_;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer);
int col_offset = iteration_strided_ * ThreadMap::Delta::kStrided;
bool col_guard = ((thread_start_col_ + col_offset) < extent_col_);
bool guard = col_guard && mask_.predicates[iteration_contiguous_];
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
*frag_ptr, (void *)memory_pointer, guard);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int iteration) {
iteration_contiguous_ = iteration % ThreadMap::Iterations::kContiguous;
iteration_strided_ = iteration / ThreadMap::Iterations::kContiguous;
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIterator &operator++() {
++iteration_contiguous_;
byte_pointer_ += params_.advance_row;
if (iteration_contiguous_ == ThreadMap::Iterations::kContiguous) {
iteration_contiguous_ = 0;
++iteration_strided_;
byte_pointer_ += params_.advance_column;
if (iteration_strided_ == ThreadMap::Iterations::kStrided) {
iteration_strided_ = 0;
}
}
return *this;
}
/// Advances a number of positions to load or store
CUTLASS_HOST_DEVICE
InterleavedPredicatedTileIterator &operator+=(int increment)
{
// Contiguous
iteration_contiguous_ += increment;
int increment_strided = iteration_contiguous_ / ThreadMap::Iterations::kContiguous;
iteration_contiguous_ = iteration_contiguous_ % ThreadMap::Iterations::kContiguous;
byte_pointer_ += (params_.advance_row * increment);
// Strided
iteration_strided_ += increment_strided;
byte_pointer_ += (params_.advance_column * increment_strided);
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
};
///////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | InterleavedMaskedTileIterator | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
int InterleavedN ///< Number of Interleaved N
>
class InterleavedConvPredicatedTileIterator {
public:
using ThreadMap = ThreadMap_;
using Element = Element_;
using Layout = layout::TensorNCxHWx<InterleavedN>;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = Tensor4DCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Iterations::kCount;
/// Fragment object
using Fragment = Array<Element, ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
struct Params {
//
// Data members
//
LongIndex stride_col; ///< stride in bytes between columns
LongIndex stride_row; ///< stride in bytes between rows
//
// Methods
//
CUTLASS_HOST_DEVICE
Status initialize(typename Layout::Stride stride_) {
stride_col = stride_[1];
stride_row = stride_[2];
return Status::kSuccess;
}
CUTLASS_HOST_DEVICE
Params() {
initialize(cutlass::make_Coord(0, 0, 0));
}
CUTLASS_HOST_DEVICE
Params(Layout const &layout) {
initialize(layout.stride());
}
};
/// Mask object
struct Mask {
static int const kCount =
(ThreadMap::Iterations::kRow < 8) ? 8 : ThreadMap::Iterations::kRow;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
Params params_;
/// Byte-level pointer
uint8_t *byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in columns
Index extent_col_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in pq
Index extent_pq_;
/// A thread's starting row position (assuming steady-state predicates have
/// been computed)
Index thread_start_row_;
/// A thread's starting column position (assuming steady-state predicates have
/// been computed)
Index thread_start_col_;
/// Internal iteration counter
LongIndex iteration_row_;
LongIndex iteration_col_;
uint32_t pq_mul_;
uint32_t pq_shr_;
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
InterleavedConvPredicatedTileIterator(
Params const & params,
Element *pointer,
TensorCoord extent,
int thread_idx,
MatrixCoord threadblock_offset
):
params_(params) {
MatrixCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_col_ = extent.c();
extent_pq_ = extent.h() * extent.w();
extent_row_ = extent.n() * extent_pq_;
find_divisor(pq_mul_, pq_shr_, extent_pq_);
thread_start_row_ = thread_offset.row();
thread_start_col_ = thread_offset.column();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int r = 0; r < ThreadMap::Iterations::kRow; ++r) {
mask_.predicates[r] =
((thread_offset.row() + ThreadMap::Delta::kRow * r) < extent_row_);
}
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) +
((thread_start_col_ / InterleavedN) * params_.stride_col +
(thread_start_col_ % InterleavedN)) *
sizeof_bits<Element>::value / 8;
// Initialize internal state counter
iteration_row_ = iteration_col_ = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
int col_offset = iteration_col_ * ThreadMap::Delta::kColumn;
bool col_guard = ((thread_start_col_ + col_offset) < extent_col_);
bool guard = col_guard && mask_.predicates[iteration_row_];
int n, pq_rem;
fast_divmod(n, pq_rem,
thread_start_row_ + iteration_row_ * ThreadMap::Delta::kRow,
extent_pq_, pq_mul_, pq_shr_);
uint8_t *byte_pointer =
byte_pointer_ + (n * params_.stride_row + pq_rem * InterleavedN) *
sizeof_bits<Element>::value / 8;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
AccessType const *memory_pointer =
reinterpret_cast<AccessType const *>(byte_pointer);
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
*frag_ptr,
(void *)memory_pointer,
guard);
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
int col_offset = iteration_col_ * ThreadMap::Delta::kColumn;
bool col_guard = ((thread_start_col_ + col_offset) < extent_col_);
bool guard = col_guard && mask_.predicates[iteration_row_];
int n, pq_rem;
fast_divmod(n, pq_rem,
thread_start_row_ + iteration_row_ * ThreadMap::Delta::kRow,
extent_pq_, pq_mul_, pq_shr_);
uint8_t *byte_pointer =
byte_pointer_ + (n * params_.stride_row + pq_rem * InterleavedN) *
sizeof_bits<Element>::value / 8;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer);
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
*frag_ptr, (void *)memory_pointer, guard);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int iteration) {
iteration_row_ = iteration % ThreadMap::Iterations::kRow;
iteration_col_ = iteration / ThreadMap::Iterations::kRow;
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
InterleavedConvPredicatedTileIterator &operator++() {
++iteration_row_;
if (iteration_row_ == ThreadMap::Iterations::kRow) {
iteration_row_ = 0;
++iteration_col_;
byte_pointer_ += params_.stride_col;
if (iteration_col_ == ThreadMap::Iterations::kColumn) {
iteration_col_ = 0;
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 40,870 | C | 29.23003 | 164 | 0.618106 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_direct_conv.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/permute.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
#include "cutlass/conv/conv2d_problem_size.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: PitchLinearThreadMap)
typename Element_, ///< Element data type
typename ThreadOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1>,
typename ThreadBlockOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1>
>
class PredicatedTileIteratorDirectConv {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using ThreadOutputShape = ThreadOutputShape_;
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
using ConvProblemSize = typename cutlass::conv::Conv2dProblemSize;
/// Fragment object
using Fragment = Array<Element, ThreadMap::Iterations::kCount * kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, kElementsPerAccess>;
static int const kLoadsPerAccess = AccessType::kElements / AccessType::kElements;
using ThreadTileCount = MatrixShape<
ThreadBlockOutputShape::kH / ThreadOutputShape::kH,
ThreadBlockOutputShape::kW / ThreadOutputShape::kW
>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorDirect2dConvParams {
using Base = PredicatedTileIteratorDirect2dConvParams;
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout, cutlass::conv::Conv2dProblemSize const &problem_size):
PredicatedTileIteratorDirect2dConvParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
problem_size,
{ThreadBlockOutputShape::kH, ThreadBlockOutputShape::kW}
)
{ }
CUTLASS_HOST_DEVICE
Params(Base const &base) :
Base(base) { }
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kContiguous;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorDirect2dConvParams params_;
/// Byte-level pointer
uint8_t *byte_pointer_;
///
Element *pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in rows
Index extent_column_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// A thread's starting column
Index thread_start_column_;
/// Initial thread ouput location
int thread_start_n_, thread_start_p_, thread_start_q_;
/// Current threadblock tile index
int tile_index_;
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorDirect2dConvParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorDirectConv(
PredicatedTileIteratorDirect2dConvParams const & params,
Element *pointer,
TensorCoord extent,
int thread_idx,
TensorCoord threadblock_offset = TensorCoord()
):
params_(params), pointer_(pointer)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx);
extent_row_ = extent.row();
extent_column_ = extent.column();
// stride dim (PQ)
thread_start_row_ = thread_offset.column();
// contiguous dim (Channels)
thread_start_column_ = threadblock_offset.column() + thread_offset.row();
tile_index_ = threadblock_offset.row();
set_tile_index(0);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void set_tile_index(const int index) {
int residual;
params_.pq_divmod(thread_start_n_, residual, tile_index_ + index);
params_.q_divmod(thread_start_p_, thread_start_q_, residual);
// Compute the base output coord of ThreadBlock
thread_start_p_ *= ThreadBlockOutputShape::kH;
thread_start_q_ *= ThreadBlockOutputShape::kW;
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
mask_.predicates[c] = ((thread_start_column_
+ c * ThreadMap::Delta::kContiguous) < extent_column_);
}
// Null pointer performs no accesses
if (!pointer_) {
mask_.clear();
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, int64_t byte_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int frag_base_idx = s * ThreadMap::Iterations::kContiguous + c;
int current_row = thread_start_row_ + s * ThreadMap::Delta::kStrided;
int p = current_row / ThreadBlockOutputShape::kW;
int q = current_row % ThreadBlockOutputShape::kW;
int current_p = thread_start_p_ + p;
int current_q = thread_start_q_ + q;
bool row_guard = (current_p) < params_.P && (current_q) < params_.Q &&
(thread_start_n_ < params_.N) && current_row < ThreadMap::Shape::kStrided;
int output_row_offset =
thread_start_n_ * params_.stride_n + current_p * params_.stride_p + current_q;
uint8_t *byte_pointer =
reinterpret_cast<uint8_t *>(pointer_) +
LongIndex(output_row_offset) * LongIndex(params_.stride) +
LongIndex(thread_start_column_ + c * ThreadMap::Delta::kContiguous) *
sizeof(AccessType) / kElementsPerAccess;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
bool guard = row_guard && mask_.predicates[c];
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
frag_ptr[frag_base_idx], (void *)&memory_pointer[0], guard);
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) const {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int frag_base_idx = s * ThreadMap::Iterations::kContiguous + c;
int current_row = thread_start_row_ + s * ThreadMap::Delta::kStrided;
int p = current_row / ThreadBlockOutputShape::kW;
int q = current_row % ThreadBlockOutputShape::kW;
int current_p = thread_start_p_ + p;
int current_q = thread_start_q_ + q;
bool row_guard = (current_p) < params_.P && (current_q) < params_.Q &&
(thread_start_n_ < params_.N) && current_row < ThreadMap::Shape::kStrided;
int output_row_offset =
thread_start_n_ * params_.stride_n + current_p * params_.stride_p + current_q;
uint8_t *byte_pointer =
reinterpret_cast<uint8_t *>(pointer_) +
LongIndex(output_row_offset) * LongIndex(params_.stride) +
LongIndex(thread_start_column_ + c * ThreadMap::Delta::kContiguous) *
sizeof(AccessType) / kElementsPerAccess;
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset);
bool guard = row_guard && mask_.predicates[c];
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_base_idx], (void *)&memory_pointer[0], guard);
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) const {
store_with_byte_offset(frag, 0);
}
CUTLASS_DEVICE
MatrixCoord thread_start() const {
return MatrixCoord(thread_start_row_, thread_start_column_);
}
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_row() const {
return thread_start_row_;
}
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_column() const {
return thread_start_column_;
}
/// Extent of the matrix in rows
CUTLASS_DEVICE
Index extent_row() const {
return extent_row_;
}
/// Extent of the matrix in columns
CUTLASS_DEVICE
Index extent_column() const {
return extent_column_;
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorDirectConv &operator++() {
// do nothing
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) const {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 13,872 | C | 30.105381 | 103 | 0.649438 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_affine_layout_params.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/fast_math.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int Rank
>
struct PredicatedTileIteratorAffineLayoutRankNParams {
using Layout = layout::AffineRankN<Rank>;
using TensorCoord = typename Layout::TensorCoord;
static bool const kBigEndian = false;
//
// Data members
//
Layout layout;
/// Stride in units of bytes along M modes
Coord<Layout::kRank/2, typename Layout::LongIndex> stride_m;
/// Stride in units of bytes along N modes
Coord<Layout::kRank/2, typename Layout::LongIndex> stride_n;
/// Fast divmod objects divided by tensor extents
FastDivmod divmod_m[(Layout::kRank == 2) ? 1 : (Layout::kRank/2 - 1)];
/// Fast divmod objects divided by tensor extents
FastDivmod divmod_n[(Layout::kRank == 2) ? 1 : (Layout::kRank/2 - 1)];
int64_t rank2_inc_col;
int64_t rank2_inc_row;
//
// Methods
//
CUTLASS_HOST_DEVICE
PredicatedTileIteratorAffineLayoutRankNParams() { }
CUTLASS_HOST_DEVICE
PredicatedTileIteratorAffineLayoutRankNParams(TensorCoord const &extent,
Layout const &layout_,
int64_t element_sizeof_bits)
: layout(layout_)
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2; ++i) {
stride_m[i] = OffsetBytes(layout_.stride()[i], element_sizeof_bits);
stride_n[i] = OffsetBytes(layout_.stride()[i + Layout::kRank / 2], element_sizeof_bits);
}
if (kBigEndian) {
// "Big Endian" scheme
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2 - 1; ++i) {
divmod_m[i] = FastDivmod(extent[i + 1]);
divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2 + 1]);
}
}
else {
// "Little Endian" scheme
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2 - 1; ++i) {
divmod_m[i] = FastDivmod(extent[i]);
divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2]);
}
}
#if 0
//
// Debug print statements to verify extents and strides are passed correctly.
//
printf("PredicatedTileIteratorAffine::Params() entered\n");
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank; ++i) {
printf(" extent[%d]: %d\n", i, extent[i]);
}
for (int i = 0; i < Layout::kRank; ++i) {
printf(" stride[%d]: %ld\n", i, layout_.stride()[i]);
}
printf("PredicatedTileIteratorAffine::Params() returning\n");
#endif
}
CUTLASS_HOST_DEVICE
PredicatedTileIteratorAffineLayoutRankNParams(Layout const &layout_,
int32_t threadmap_delta_kColumn,
int32_t threadmap_delta_kRow,
int64_t element_sizeof_bits)
: layout(layout_)
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank / 2; ++i) {
stride_m[i] = OffsetBytes(layout_.stride()[i], element_sizeof_bits);
stride_n[i] = OffsetBytes(layout_.stride()[i + Layout::kRank / 2], element_sizeof_bits);
}
rank2_inc_col = threadmap_delta_kColumn * stride_n[0];
rank2_inc_row = threadmap_delta_kRow * stride_m[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 5,636 | C | 34.904458 | 100 | 0.584989 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_smem_accumulator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMM/CONV to store accumulator in shared memory after
applying scale, bias loaded from global memory and element-wise operations.
This Epilogue is typically used in fused GEMM/CONV to stage the intermediate accumulator.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename SmemTileIterator_, ///< Shared memory Tile iterator to output to shared memory
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename ScaleBiasIterator_, ///< Iterator to load scale and bias from global memory
typename OutputOp_ ///< Output operator
>
class EpilogueSmemAccumulator {
public:
using SmemTileIterator = SmemTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using ScaleBiasIterator = ScaleBiasIterator_;
using OutputOp = OutputOp_;
/// Fragment of accumulator tile
using FragmentAccumulator = typename AccumulatorFragmentIterator::Fragment;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Fragment of Scale and Bias loaded from global memory
using FragmentScaleBias = typename ScaleBiasIterator::Fragment;
static const bool PerChannelScale = (OutputOp::kScale ==
epilogue::thread::ScaleType::OnlyAlphaPerChannelScaling);
/// Constructor
CUTLASS_DEVICE
EpilogueSmemAccumulator() {}
/// Streams the result to shared memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
SmemTileIterator smem_iterator, ///< Tile iterator for destination in shared memory
AccumulatorTile const &accumulator, ///< Complete warp-level accumulator tile
ScaleBiasIterator scale_iterator, ///< iterator for scale vector in global memory
ScaleBiasIterator bias_iterator) { ///< iterator for bias vector in global memory
// Fragment to load scale bias from global memory
FragmentScaleBias tb_frag_scale;
FragmentScaleBias tb_frag_bias;
/// Fragment Iterator to load slice of accumulator tile
AccumulatorFragmentIterator frag_iterator_accum(accumulator);
FragmentAccumulator tb_frag_accum;
/// Epilogue output fragment
typename SmemTileIterator::Fragment tb_frag_smem;
/// Load scale and bias from global memory
if(PerChannelScale)
scale_iterator.load(tb_frag_scale);
bias_iterator.load(tb_frag_bias);
/// Iterate over the accumulator tile and store to shared memory
CUTLASS_PRAGMA_UNROLL
for (int rid = 0; rid < AccumulatorFragmentIterator::TileIterations::kRow; ++rid) {
CUTLASS_PRAGMA_UNROLL
for (int cid = 0; cid < AccumulatorFragmentIterator::TileIterations::kColumn; ++cid) {
using AccumulatorAccessType = typename OutputOp::FragmentAccumulator;
using ScaleBiasAccessType = typename OutputOp::FragmentScaleBias;
using FragmentSmemAccessType = typename OutputOp::FragmentOutput;
ScaleBiasAccessType const * scale_frag_ptr =
reinterpret_cast<ScaleBiasAccessType const *>(&tb_frag_scale);
ScaleBiasAccessType const * bias_frag_ptr =
reinterpret_cast<ScaleBiasAccessType const *>(&tb_frag_bias);
FragmentSmemAccessType * smem_frag_ptr =
reinterpret_cast<FragmentSmemAccessType *>(&tb_frag_smem);
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < AccumulatorFragmentIterator::kIterationsPerTile; ++idx) {
frag_iterator_accum.load(tb_frag_accum);
++frag_iterator_accum;
AccumulatorAccessType const * accumulator_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&tb_frag_accum);
const int kOutputIterations = FragmentAccumulator::kElements / OutputOp::kCount;
CUTLASS_PRAGMA_UNROLL
for (int it = 0; it < kOutputIterations; it++) {
smem_frag_ptr[idx * kOutputIterations + it] = output_op(accumulator_frag_ptr[it],
scale_frag_ptr[cid * kOutputIterations + it], bias_frag_ptr[cid * kOutputIterations + it]);
}
}
smem_iterator.store(tb_frag_smem);
++smem_iterator;
}
}
}
/// Streams the result to shared memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
SmemTileIterator smem_iterator, ///< Tile iterator for destination in shared memory
AccumulatorTile const &accumulator) { ///< Complete warp-level accumulator tile
/// Fragment Iterator to load slice of accumulator tile
AccumulatorFragmentIterator frag_iterator_accum(accumulator);
FragmentAccumulator tb_frag_accum;
/// Epilogue output fragment
typename SmemTileIterator::Fragment tb_frag_smem;
/// Iterate over the accumulator tile and store to shared memory
CUTLASS_PRAGMA_UNROLL
for (int rid = 0; rid < AccumulatorFragmentIterator::TileIterations::kRow; ++rid) {
CUTLASS_PRAGMA_UNROLL
for (int cid = 0; cid < AccumulatorFragmentIterator::TileIterations::kColumn; ++cid) {
using AccumulatorAccessType = typename OutputOp::FragmentAccumulator;
using FragmentSmemAccessType = typename OutputOp::FragmentOutput;
FragmentSmemAccessType * smem_frag_ptr =
reinterpret_cast<FragmentSmemAccessType *>(&tb_frag_smem);
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < AccumulatorFragmentIterator::kIterationsPerTile; ++idx) {
frag_iterator_accum.load(tb_frag_accum);
++frag_iterator_accum;
AccumulatorAccessType const * accumulator_frag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&tb_frag_accum);
const int kOutputIterations = FragmentAccumulator::kElements / OutputOp::kCount;
CUTLASS_PRAGMA_UNROLL
for (int it = 0; it < kOutputIterations; it++) {
smem_frag_ptr[idx * kOutputIterations + it] = output_op(accumulator_frag_ptr[it]);
}
}
smem_iterator.store(tb_frag_smem);
++smem_iterator;
}
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 9,073 | C | 38.281385 | 107 | 0.661854 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_base.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if !defined(__CUDACC_RTC__)
#include <type_traits>
#include <utility>
#endif
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
//
// This is used for metaprogramming epilogue functors. If they define
// `static bool const kIsHeavy = true;`, then the epilogue functor itself is
// not inlined. This results in smaller code and is advantageous if the epilogue
// functor consists of many instructions.
//
// If the epilogue functor does not define `kIsHeavy` or if it is `false`, then
// the behavior from CUTLASS 2.5 and before is retained. The epilogue is fully
// unrolled and inlined.
//
template<class>
struct TypeSink { typedef void type; };
template<class T> using TypeSinkT = typename TypeSink<T>::type;
template<class T, class=void> struct IsEpilogueFunctorHeavy {
static bool const value = false;
};
template<class T> struct IsEpilogueFunctorHeavy<T, TypeSinkT< decltype( T::kIsHeavy ) > > {
static bool const value = T::kIsHeavy;
};
////////////////////////////////////////////////////////////////////////////////
/// Base class for epilogues defining warp-level
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpShape_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
int FragmentsPerIteration = 1
>
class EpilogueBase {
public:
using Shape = Shape_;
using WarpShape = WarpShape_;
static int const kPartitionsK = PartitionsK;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using Padding = Padding_;
/// Output layout is always row-major
using Layout = layout::RowMajor;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename AccumulatorTile::Element;
/// Number of warps
using WarpCount = gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
kPartitionsK
>;
/// Use this to control the granularity of one epilogue 'iteration'
static int const kFragmentsPerIteration = FragmentsPerIteration;
public:
/// Shared storage allocation needed by the epilogue
struct SharedStorage {
//
// Type definitions
//
/// Element type of shared memory
using Element = typename WarpTileIterator::Element;
/// Tensor reference to shared memory allocation
using TensorRef = typename WarpTileIterator::TensorRef;
/// Layout of shared memory allocation
using Layout = typename WarpTileIterator::Layout;
/// Logical shape of the shared memory tile written to by all warps.
using Shape = MatrixShape<
WarpCount::kM * WarpTileIterator::Shape::kRow * WarpCount::kK,
WarpCount::kN * WarpTileIterator::Shape::kColumn
>;
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
(Shape::kRow + Padding::kRow) * kFragmentsPerIteration,
Shape::kColumn + Padding::kColumn
>;
//
// Data members
//
AlignedBuffer<Element, StorageShape::kCount> storage;
//
// Methods
//
/// Returns a pointer to the shared memory buffer
CUTLASS_DEVICE
Element *data() {
return storage.data();
}
/// Returns a tensor reference to the shared memory buffer
CUTLASS_DEVICE
TensorRef reference() {
return TensorRef(
storage.data(),
Layout::packed({StorageShape::kRow, StorageShape::kColumn}));
}
};
protected:
//
// Data members
//
SharedStorage &shared_storage_;
/// Stores a warp's fragment of accumulators to SMEM
WarpTileIterator warp_tile_iterator_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueBase(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
shared_storage_(shared_storage),
warp_tile_iterator_(shared_storage.reference(), lane_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to three coordinates:
//
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_k = warp_idx / (WarpCount::kM * WarpCount::kN);
int warp_mn = warp_idx % (WarpCount::kM * WarpCount::kN);
int warp_m = warp_mn % WarpCount::kM;
int warp_n = warp_mn / WarpCount::kM;
MatrixCoord warp_offset{warp_k * WarpCount::kM + warp_m, warp_n};
warp_tile_iterator_.add_tile_offset(warp_offset);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 8,279 | C | 33.356846 | 128 | 0.654668 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/wmma_sm70.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/layout/matrix.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for half
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
cutlass::half_t, ///< ElementA
LayoutA_, ///< LayoutA
cutlass::half_t, ///< ElementB
LayoutB_, ///< LayoutB
ElementC_, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM70_ENABLED)
using Shape = Shape_;
using ElementA = cutlass::half_t;
using LayoutA = LayoutA_;
using ElementB = cutlass::half_t;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpMultiplyAdd;
using ArchTag = arch::Sm70;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<16, 16, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape< 8, 32, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape<32, 8, 16>, Shape>::value,
"Supported list of wmma operator shape for f16 multiplicands are: 16x16x16, 8x32x16, and 32x8x16");
// check supported wmma output data type for the given multiplicand data types
static_assert(
platform::is_same<cutlass::half_t, ElementC>::value || platform::is_same<float, ElementC>::value,
"Supported of wmma output data type for f16 multiplicands are: f16 and f32");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::mma_sync(D, A, B, C);
}
#else
static_assert(false, "wmma.mma.sync for floating point multiplicands is avialable only for SM70 and beyond");
#endif
};
} // namespace arch
} // namespace cutlass
| 5,286 | C | 37.591241 | 113 | 0.615399 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sm50.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#include "cutlass/arch/mma.h"
#include "cutlass/complex.h"
#include "cutlass/quaternion.h"
#include "cutlass/functional.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, float, LayoutA, float, LayoutB, float, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = float;
CUTLASS_HOST_DEVICE
void operator()(
Array<float, 1> &d,
Array<float, 1> const &a,
Array<float, 1> const &b,
Array<float, 1> const &c
) {
d[0] = a[0] * b[0] + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, double, LayoutA, double, LayoutB, double, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = double;
CUTLASS_HOST_DEVICE
void operator()(
Array<double, 1> &d,
Array<double, 1> const &a,
Array<double, 1> const &b,
Array<double, 1> const &c
) {
d[0] = a[0] * b[0] + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, int, LayoutA, int, LayoutB, int, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = int;
CUTLASS_HOST_DEVICE
void operator()(
Array<int, 1> &d,
Array<int, 1> const &a,
Array<int, 1> const &b,
Array<int, 1> const &c
) {
d[0] = a[0] * b[0] + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<float>,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<complex<float>, 1> const &a,
Array<complex<float>, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = a[0].real() * b[0].real() + c[0].real();
d[0].imag() = a[0].imag() * b[0].real() + c[0].imag();
d[0].real() = -a[0].imag() * b[0].imag() + d[0].real();
d[0].imag() = a[0].real() * b[0].imag() + d[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<float>,
LayoutA,
float,
LayoutB,
complex<float>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<complex<float>, 1> const &a,
Array<float, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = a[0].real() * b[0] + c[0].real();
d[0].imag() = a[0].imag() * b[0] + c[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
float,
LayoutA,
complex<float>,
LayoutB,
complex<float>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<float>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<float>, 1> &d,
Array<float, 1> const &a,
Array<complex<float>, 1> const &b,
Array<complex<float>, 1> const &c
) {
d[0].real() = a[0] * b[0].real() + c[0].real();
d[0].imag() = a[0] * b[0].imag() + d[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<double>,
LayoutA,
complex<double>,
LayoutB,
complex<double>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<double>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<double>, 1> &d,
Array<complex<double>, 1> const &a,
Array<complex<double>, 1> const &b,
Array<complex<double>, 1> const &c
) {
d[0].real() = a[0].real() * b[0].real() + c[0].real();
d[0].imag() = a[0].imag() * b[0].real() + c[0].imag();
d[0].real() = -a[0].imag() * b[0].imag() + d[0].real();
d[0].imag() = a[0].real() * b[0].imag() + d[0].imag();
}
};
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
complex<double>,
LayoutA,
double,
LayoutB,
complex<double>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<double>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<double>, 1> &d,
Array<complex<double>, 1> const &a,
Array<double, 1> const &b,
Array<complex<double>, 1> const &c
) {
d[0].real() = a[0].real() * b[0] + c[0].real();
d[0].imag() = a[0].imag() * b[0] + c[0].imag();
}
};
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<
gemm::GemmShape<1, 1, 1>,
1,
double,
LayoutA,
complex<double>,
LayoutB,
complex<double>,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAddComplex;
using ElementC = complex<double>;
CUTLASS_HOST_DEVICE
void operator()(
Array<complex<double>, 1> &d,
Array<double, 1> const &a,
Array<complex<double>, 1> const &b,
Array<complex<double>, 1> const &c
) {
d[0].real() = a[0] * b[0].real() + c[0].real();
d[0].imag() = a[0] * b[0].imag() + d[0].imag();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, half_t, LayoutA, half_t, LayoutB, float, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = float;
CUTLASS_HOST_DEVICE
void operator()(
Array<float, 1> &d,
Array<half_t, 1> const &a,
Array<half_t, 1> const &b,
Array<float, 1> const &c
) {
d[0] = float(a[0]) * float(b[0]) + c[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation for Quaternions
template <
/// Layout of A matrix
typename LayoutA,
/// Layout of B matrix
typename LayoutB,
/// Layout of C matrix
typename LayoutC
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, Quaternion<float>, LayoutA, Quaternion<float>, LayoutB, Quaternion<float>, LayoutC, OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = OpMultiplyAdd;
using Element = Quaternion<float>;
using ElementC = Element;
CUTLASS_HOST_DEVICE
void operator()(
Array<Element, 1> &d,
Array<Element, 1> const &a,
Array<Element, 1> const &b,
Array<Element, 1> const &c
) {
multiply_add<Element, Element, Element> op;
d[0] = op(a[0], b[0], c[0]);
}
};
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,096 | C | 24.628175 | 140 | 0.567502 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sm75.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply for SM75
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/arch/wmma.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
// CUDA Toolkit includes for nvcuda::wmma needed for binarized matrix multiply.
#include <mma.h>
#include "cutlass/wmma_array.h"
#endif
// CUTLASS includes
#include "cutlass/arch/mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
#if ((__CUDACC_VER_MAJOR__ > 10) || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))
#define CUTLASS_ARCH_MMA_SM75_SUPPORTED 1
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750))
#define CUTLASS_ARCH_MMA_SM75_ENABLED
#endif
#endif
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 1688 - FP16 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation - F16 = F16 * F16 + F16
template <>
struct Mma<
gemm::GemmShape<16, 8, 8>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 8>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 2>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
unsigned const *C = reinterpret_cast<unsigned const *>(&c);
unsigned *D = reinterpret_cast<unsigned *>(&d);
asm volatile(
"mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 {%0,%1}, {%2,%3}, {%4}, {%5,%6};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Matrix Multiply 1688 - FP32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct Mma<
gemm::GemmShape<16, 8, 8>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16, 8, 8>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 2>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
:
"r"(A[0]), "r"(A[1]),
"r"(B[0]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3])
);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Integer matrix multiply .8816 (8b)
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 4>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 4>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.s32.u8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 4>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.s8.u8 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<8, 8, 16>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 4>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.s32.u8.u8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Integer matrix multiply (8b) with SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,16>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 4>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.s8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,16>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 4>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.u8.s8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,16>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,16>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 4>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.s8.u8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,16>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,16>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 4>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 4>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k16.row.col.satfinite.s32.u8.u8.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Integer matrix multiply (4b)
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
int4b_t,
layout::RowMajor,
int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int4b_t, 8>;
using ElementB = int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
uint4b_t,
layout::RowMajor,
int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint4b_t, 8>;
using ElementB = int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.u4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
int4b_t,
layout::RowMajor,
uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int4b_t, 8>;
using ElementB = uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.s4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
uint4b_t,
layout::RowMajor,
uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint4b_t, 8>;
using ElementB = uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.s32.u4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Integer matrix multiply (4b) - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
int4b_t,
layout::RowMajor,
int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int4b_t, 8>;
using ElementB = int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.satfinite.s32.s4.s4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
uint4b_t,
layout::RowMajor,
int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint4b_t, 8>;
using ElementB = int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("_mma.m8n8k32.row.col.u4.s4.sat {%0,%1}, %2, %3, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
int4b_t,
layout::RowMajor,
uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int4b_t, 8>;
using ElementB = uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.satfinite.s32.s4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct Mma<
gemm::GemmShape<8,8,32>,
32,
uint4b_t,
layout::RowMajor,
uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate> {
using Shape = gemm::GemmShape<8,8,32>;
using ElementA = uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint4b_t, 8>;
using ElementB = uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint4b_t, 8>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
unsigned const & A = reinterpret_cast<unsigned const &>(a);
unsigned const & B = reinterpret_cast<unsigned const &>(b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
asm volatile("mma.sync.aligned.m8n8k32.row.col.satfinite.s32.u4.u4.s32 {%0,%1}, {%2}, {%3}, {%4,%5};\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A), "r"(B), "r"(C[0]), "r"(C[1]));
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// b1 ^ b1 + s32 => s32
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <>
struct Mma<
gemm::GemmShape<8,8,128>,
32,
uint1b_t,
layout::RowMajor,
uint1b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpXorPopc> {
using Shape = gemm::GemmShape<8,8,128>;
using ElementA = uint1b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint1b_t, 32>;
using ElementB = uint1b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint1b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 2>;
using Operator = OpXorPopc;
using ArchTag = arch::Sm75;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) const {
#if defined(CUTLASS_ARCH_MMA_SM75_ENABLED)
#if (__CUDA_ARCH__ >= 900) || (defined(CUTLASS_ARCH_WMMA_ENABLED))
using WmmaFragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
nvcuda::wmma::experimental::precision::b1,
nvcuda::wmma::row_major>;
using WmmaFragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
nvcuda::wmma::experimental::precision::b1,
nvcuda::wmma::col_major>;
using WmmaFragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
int>;
WmmaFragmentA const & A = reinterpret_cast<WmmaFragmentA const &>(a);
WmmaFragmentB const & B = reinterpret_cast<WmmaFragmentB const &>(b);
WmmaFragmentC const & C = reinterpret_cast<WmmaFragmentC const &>(c);
WmmaFragmentC & D = reinterpret_cast<WmmaFragmentC &>(d);
nvcuda::wmma::bmma_sync(D, A, B, C, nvcuda::wmma::experimental::bmmaBitOpXOR,
nvcuda::wmma::experimental::bmmaAccumulateOpPOPC);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0); // WMMA must be supported to issue binary matrix multiply-accumulate instructions.
#endif // defined(CUTLASS_ARCH_WMMA_ENABLED)
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| 31,652 | C | 23.31106 | 113 | 0.601352 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sm60.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#include <cuda_fp16.h>
#include "cutlass/arch/mma.h"
#include "cutlass/layout/matrix.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <typename LayoutA, typename LayoutB, typename LayoutC>
struct Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
LayoutA,
half_t,
LayoutB,
half_t,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<2, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 2> &d,
Array<half_t, 2> const &a,
Array<half_t, 1> const &b,
Array<half_t, 2> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 const & A = reinterpret_cast<__half2 const &>(a);
__half2 B = __half2half2(reinterpret_cast<__half const &>(b));
__half2 const & C = reinterpret_cast<__half2 const &>(c);
__half2 D = __hfma2(A, B, C);
d = reinterpret_cast<Array<half_t, 2> &>(D);
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
d[i] = a[i] * b[0] + c[i];
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <typename LayoutA, typename LayoutB>
struct Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
LayoutA,
half_t,
LayoutB,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 2, 1>;
using Operator = OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 2> &d,
Array<half_t, 1> const &a,
Array<half_t, 2> const &b,
Array<half_t, 2> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 const & A = __half2half2(reinterpret_cast<__half const &>(a));
__half2 B = reinterpret_cast<__half2 const &>(b);
__half2 const & C = reinterpret_cast<__half2 const &>(c);
__half2 D = __hfma2(A, B, C);
d = reinterpret_cast<Array<half_t, 2> &>(D);
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
d[i] = a[0] * b[i] + c[i];
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <>
struct Mma <
gemm::GemmShape<2, 2, 1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<2, 2, 1>;
using Operator = OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 4> &d,
Array<half_t, 2> const &a,
Array<half_t, 2> const &b,
Array<half_t, 4> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 const & A = reinterpret_cast<__half2 const &>(a);
__half2 Blo = __low2half2(reinterpret_cast<__half2 const &>(b));
__half2 Bhi = __high2half2(reinterpret_cast<__half2 const &>(b));
__half2 const *C = reinterpret_cast<__half2 const *>(&c);
__half2 Dlo = __hfma2(A, Blo, C[0]);
__half2 Dhi = __hfma2(A, Bhi, C[1]);
Array<half_t, 2> * D = reinterpret_cast<Array<half_t, 2> *>(&d);
D[0] = reinterpret_cast<Array<half_t, 2> const &>(Dlo);
D[1] = reinterpret_cast<Array<half_t, 2> const &>(Dhi);
#else
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < 2; ++j) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
d[i + 2 * j] = a[i] * b[j] + c[i + 2 * j];
}
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <>
struct Mma<
gemm::GemmShape<2, 2, 1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<2, 2, 1>;
using Operator = OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 4> &d,
Array<half_t, 2> const &a,
Array<half_t, 2> const &b,
Array<half_t, 4> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 Alo = __low2half2(reinterpret_cast<__half2 const &>(a));
__half2 Ahi = __high2half2(reinterpret_cast<__half2 const &>(a));
__half2 const & B = reinterpret_cast<__half2 const &>(b);
__half2 const *C = reinterpret_cast<__half2 const *>(&c);
__half2 Dlo = __hfma2(Alo, B, C[0]);
__half2 Dhi = __hfma2(Ahi, B, C[0]);
Array<half_t, 2> * D = reinterpret_cast<Array<half_t, 2> *>(&d);
D[0] = reinterpret_cast<Array<half_t, 2> &>(Dlo);
D[1] = reinterpret_cast<Array<half_t, 2> &>(Dhi);
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < 2; ++j) {
d[i * 2 + j] = a[i] * b[j] + c[i * 2 + j];
}
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
| 7,040 | C | 26.830039 | 100 | 0.553835 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for multiply-add operations
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/arch.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the operation implied by MMA.
struct OpMultiplyAdd;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the result is saturated to MAX_FLOAT|MIN_FLOAT or MAX_INT|MIN_INT
struct OpMultiplyAddSaturate;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to a narrower type (BF16)
struct OpMultiplyAddFastBF16;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to a narrower type (F16)
struct OpMultiplyAddFastF16;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to 2 (big and small) TF32 components
// Perform 3xTF32 or 4xTF32 for every F32 output element
struct OpMultiplyAddFastF32;
/// Tag indicating the input is converted to 2 (big and small) TF32 components
// Perform 3xTF32 or 4xTF32 for every complex<F32> output element
struct OpMultiplyAddComplexFastF32;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the complex multiply-add operation
struct OpMultiplyAddComplex;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the gaussian complex multiply-add operation
struct OpMultiplyAddGaussianComplex;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the inner product is defined by (XOR, POPC)
struct OpXorPopc;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifying math operators as thread-level operations.
struct OpClassSimt;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifing operators as Tensor Core operations.
struct OpClassTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifing operators as WMMA Tensor Core operations
struct OpClassWmmaTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Number of threads participating
int kThreads_,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator
>
struct Mma;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation - specialized for 1x1x1x1 matrix multiply operation
template <
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator_
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, ElementA, LayoutA, ElementB, LayoutB, ElementC_, LayoutC, Operator_> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = Operator_;
using ElementC = ElementC_;
CUTLASS_HOST_DEVICE
void operator()(
Array<ElementC, 1> &d,
Array<ElementA, 1> const &a,
Array<ElementB, 1> const &b,
Array<ElementC, 1> const &c
) {
multiply_add<ElementA, ElementB, ElementC> op;
d[0] = op(a[0], b[0], c[0]);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specifies internal data type for computation
struct SPFormatType {
enum Kind {
Thread
};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Number of threads participating
int kThreads_,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator,
/// Specifies meta data format
SPFormatType::Kind SPFormat = SPFormatType::Thread
>
struct SparseMma;
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations for each compute capability
//
#include "cutlass/arch/mma_sm50.h"
#include "cutlass/arch/mma_sm60.h"
#include "cutlass/arch/mma_sm61.h"
#include "cutlass/arch/mma_sm70.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/arch/mma_sparse_sm80.h"
#include "cutlass/arch/mma_sm90.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,037 | C | 34.100437 | 110 | 0.549956 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sparse_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Sparse matrix multiply accumulate for SM80
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
#if ((__CUDACC_VER_MAJOR__ > 11) || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))
#define CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED 1
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
#define CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED
#endif
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 16832
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F16 = F16 * F16 + F16
template <>
struct SparseMma<
gemm::GemmShape<16, 8, 32>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread
> {
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 8>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 8>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 2;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c, uint32_t const &E, int const id2) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
uint32_t const *C = reinterpret_cast<uint32_t const *>(&c);
uint32_t *D = reinterpret_cast<uint32_t *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f16.f16.f16.f16 {%0,%1}, "
"{%2,%3,%4,%5}, {%6,%7,%8,%9}, {%10,%11}, %12, 0x0;\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(E));
}
else if (id2 == 1) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f16.f16.f16.f16 {%0,%1}, "
"{%2,%3,%4,%5}, {%6,%7,%8,%9}, {%10,%11}, %12, 0x1;\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(E));
}
else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct SparseMma<
gemm::GemmShape<16, 8, 32>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread
> {
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 8>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 8>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 2;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c, uint32_t const &E, int const id2) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]),
"r"(E));
}
else if (id2 == 1) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]),
"r"(E));
}
else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 16832 - Float BF16, FP32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = bf16 * bf16 + F32
template <>
struct SparseMma<gemm::GemmShape<16, 8, 32>, 32, bfloat16_t, layout::RowMajor,
bfloat16_t, layout::ColumnMajor, float, layout::RowMajor,
OpMultiplyAdd, SPFormatType::Thread> {
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = bfloat16_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<bfloat16_t, 8>;
using ElementB = bfloat16_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<bfloat16_t, 8>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 2;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c, uint32_t const &E, int const id2) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f32.bf16.bf16.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else if (id2 == 1) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f32.bf16.bf16.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 16816 - Float TF32
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = tf32 * tf32 + F32
template <>
struct SparseMma<gemm::GemmShape<16, 8, 16>, 32, tfloat32_t, layout::RowMajor,
tfloat32_t, layout::ColumnMajor, float, layout::RowMajor,
OpMultiplyAdd, SPFormatType::Thread> {
using Shape = gemm::GemmShape<16, 8, 16>;
using ElementA = tfloat32_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<tfloat32_t, 4>;
using ElementB = tfloat32_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<tfloat32_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 4;
static int const kMaxID2 = 2;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c, uint32_t const &E, int const id2) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k16.row.col.f32.tf32.tf32.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else if (id2 == 1) {
asm volatile(
"mma.sp.sync.aligned.m16n8k16.row.col.f32.tf32.tf32.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else {
assert(0);
}
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 16864 - S8 input, S32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.s8.s8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.s8.u8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.u8.s8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.u8.u8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 16864 - S8 input, S32 accumulation - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.s8.s8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.s8.u8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.u8.s8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.u8.u8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 168128 - S4 input, S32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.s4.s4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.s4.u4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.u4.s4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.u4.u4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 168128 - S4 input, S32 accumulation - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.s4.s4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.s4.u4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.u4.s4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
if (id2 == 0)
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.u4.u4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
else
assert(0);
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 43,978 | C | 25.084816 | 102 | 0.560576 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sm90.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
#if ((__CUDACC_VER_MAJOR__ > 11) || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 8))
#define CUTLASS_ARCH_MMA_SM90_SUPPORTED 1
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900))
#define CUTLASS_ARCH_MMA_SM90_ENABLED
#endif
#endif
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
/// Matrix Multiply-Add 16x8x4 fp64
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F64 = F64 * F64 + F64
template <>
struct Mma<
gemm::GemmShape<16,8,4>,
32,
double,
layout::RowMajor,
double,
layout::ColumnMajor,
double,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<16,8,4>;
using ElementA = double;
using LayoutA = layout::RowMajor;
using FragmentA = Array<double, 2>;
using ElementB = double;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<double, 1>;
using ElementC = double;
using LayoutC = layout::RowMajor;
using FragmentC = Array<double, 4>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm90;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c) const {
#if defined(CUTLASS_ARCH_MMA_SM90_ENABLED)
double const *A = reinterpret_cast<double const *>(&a);
double const *B = reinterpret_cast<double const *>(&b);
double const *C = reinterpret_cast<double const *>(&c);
double *D = reinterpret_cast<double *>(&d);
asm volatile("mma.sync.aligned.m16n8k4.row.col.f64.f64.f64.f64 {%0, %1, %2, %3}, {%4, %5}, {%6}, {%7, %8, %9, %10};\n"
: "=d"(D[0]), "=d"(D[1]), "=d"(D[2]), "=d"(D[3])
: "d"(A[0]), "d"(A[1]),
"d"(B[0]),
"d"(C[0]), "d"(C[1]), "d"(C[2]), "d"(C[3]));
#else
CUTLASS_UNUSED(d);
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 4,430 | C | 32.568182 | 120 | 0.575621 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sm70.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#if ((__CUDACC_VER_MAJOR__ > 10) || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))
#define CUTLASS_ARCH_MMA_SM70_SUPPORTED
#endif
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700))
#if ((__CUDACC_VER_MAJOR__ > 10) || (__CUDACC_VER_MAJOR__ == 10 &&__CUDACC_VER_MINOR__ >= 1))
#define CUTLASS_ARCH_MMA_SM70_ENABLED
#endif
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Matrix multiply accumulate 884 - FP16 accumulation
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F16 = F16 * F16 + F16
template <>
struct Mma<
gemm::GemmShape<8,8,4>,
8,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::ColumnMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
unsigned const *C = reinterpret_cast<unsigned const *>(&c);
unsigned *D = reinterpret_cast<unsigned *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.col.col.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/// Matrix multiply-add operation: F16 = F16 * F16 + F16
template <>
struct Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::ColumnMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::RowMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
unsigned const *C = reinterpret_cast<unsigned const *>(&c);
unsigned *D = reinterpret_cast<unsigned *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.col.row.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/// Matrix multiply-add operation: F16 = F16 * F16 + F16
template <>
struct Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
unsigned const *C = reinterpret_cast<unsigned const *>(&c);
unsigned *D = reinterpret_cast<unsigned *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.row.col.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/// Matrix multiply-add operation: F16 = F16 * F16 + F16
template <>
struct Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::RowMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
unsigned const *C = reinterpret_cast<unsigned const *>(&c);
unsigned *D = reinterpret_cast<unsigned *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.row.row.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Matrix multiply accumulate 884 - FP32 accumulation
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
layout::ColumnMajor,
half_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::ColumnMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
/// Multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.col.col.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, "
"{%12,%13,%14,%15,%16,%17,%18,%19};\n"
: "=f"(D[0]),
"=f"(D[1]),
"=f"(D[2]),
"=f"(D[3]),
"=f"(D[4]),
"=f"(D[5]),
"=f"(D[6]),
"=f"(D[7])
: "r"(A[0]),
"r"(A[1]),
"r"(B[0]),
"r"(B[1]),
"f"(C[0]),
"f"(C[1]),
"f"(C[2]),
"f"(C[3]),
"f"(C[4]),
"f"(C[5]),
"f"(C[6]),
"f"(C[7])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::ColumnMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::RowMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
/// Multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.col.row.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, "
"{%12,%13,%14,%15,%16,%17,%18,%19};\n"
: "=f"(D[0]),
"=f"(D[1]),
"=f"(D[2]),
"=f"(D[3]),
"=f"(D[4]),
"=f"(D[5]),
"=f"(D[6]),
"=f"(D[7])
: "r"(A[0]),
"r"(A[1]),
"r"(B[0]),
"r"(B[1]),
"f"(C[0]),
"f"(C[1]),
"f"(C[2]),
"f"(C[3]),
"f"(C[4]),
"f"(C[5]),
"f"(C[6]),
"f"(C[7])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
/// Multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.row.col.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, "
"{%12,%13,%14,%15,%16,%17,%18,%19};\n"
: "=f"(D[0]),
"=f"(D[1]),
"=f"(D[2]),
"=f"(D[3]),
"=f"(D[4]),
"=f"(D[5]),
"=f"(D[6]),
"=f"(D[7])
: "r"(A[0]),
"r"(A[1]),
"r"(B[0]),
"r"(B[1]),
"f"(C[0]),
"f"(C[1]),
"f"(C[2]),
"f"(C[3]),
"f"(C[4]),
"f"(C[5]),
"f"(C[6]),
"f"(C[7])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
float,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<8, 8, 4>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 4>;
using ElementB = half_t;
using LayoutB = layout::RowMajor;
using FragmentB = Array<half_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 8>;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm70;
/// Multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c
) {
#if defined(CUTLASS_ARCH_MMA_SM70_ENABLED)
unsigned const *A = reinterpret_cast<unsigned const *>(&a);
unsigned const *B = reinterpret_cast<unsigned const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
asm volatile("mma.sync.aligned.m8n8k4.row.row.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, "
"{%12,%13,%14,%15,%16,%17,%18,%19};\n"
: "=f"(D[0]),
"=f"(D[1]),
"=f"(D[2]),
"=f"(D[3]),
"=f"(D[4]),
"=f"(D[5]),
"=f"(D[6]),
"=f"(D[7])
: "r"(A[0]),
"r"(A[1]),
"r"(B[0]),
"r"(B[1]),
"f"(C[0]),
"f"(C[1]),
"f"(C[2]),
"f"(C[3]),
"f"(C[4]),
"f"(C[5]),
"f"(C[6]),
"f"(C[7])
);
#else
assert(0);
#if defined(__CUDA_ARCH__)
asm volatile ("brkpt;\n" ::);
#endif
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation specialized for the entire warp
template <
typename LayoutA,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename Operator
>
struct Mma<
gemm::GemmShape<16, 16, 4>,
32,
half_t,
LayoutA,
half_t,
LayoutB,
ElementC,
LayoutC,
Operator
> :
public Mma<
gemm::GemmShape<8, 8, 4>,
8,
half_t,
LayoutA,
half_t,
LayoutB,
ElementC,
LayoutC,
Operator> {
using Shape = gemm::GemmShape<16, 16, 4>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| 16,554 | C | 23.857357 | 118 | 0.552072 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/simd.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing SIMD operators
*/
#pragma once
#include "../array.h"
#include "../numeric_types.h"
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Element-wise operators
//
CUTLASS_HOST_DEVICE
template <typename T, int N>
Array<T, N> operator*(Array<T, N> const &a, Array<T, N> const &b) {
Array<T, N> d;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
d[i] = a[i] * b[i];
}
return d;
}
CUTLASS_HOST_DEVICE
template <typename T, int N>
Array<T, N> operator+(Array<T, N> const &a, Array<T, N> const &b) {
Array<T, N> d;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
d[i] = a[i] + b[i];
}
return d;
}
CUTLASS_HOST_DEVICE
template <typename T, int N>
Array<T, N> operator-(Array<T, N> const &a, Array<T, N> const &b) {
Array<T, N> d;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
d[i] = a[i] - b[i];
}
return d;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Multiply-accumulate operators
//
CUTLASS_HOST_DEVICE
template <typename T, int N>
Array<T, N> mac(Array<T, N> const &a, Array<T, N> const &b, Array<T, N> const &c) {
Array<T, N> d;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
d[i] = a[i] * b[i] + c[i];
}
return d;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Dot product operator
//
CUTLASS_HOST_DEVICE
template <typename Element, typename Accumulator, int N>
Accumulator dot(Array<T, N> const &a, Array<T, N> const &b, Accumulator accum) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
accum += a[i] * b[i];
}
return accum;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "simd_sm60.h"
#include "simd_sm61.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
| 3,998 | C | 30.738095 | 100 | 0.550025 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/memory_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Architecture-specific operators on memory added for SM80
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/cache_operation.h"
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
#define CUDA_CP_ASYNC_ACTIVATED 1
#else
#define CUDA_CP_ASYNC_ACTIVATED 0
#endif
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Initiates an asynchronous copy from global memory to shared memory.
///
/// LDGSTS
///
template <
/// Size of the access in bytes
int SizeInBytes,
/// Cache operation
CacheOperation::Kind cache_op = CacheOperation::Always>
struct cp_async;
/// Initiates an asynchronous copy from global memory to shared memory. Rather than predicate
/// the entire transfer, zeros are written to SMEM if the guard predicate is false.
///
/// LDGSTS
///
template <
/// Size of the access in bytes
int SizeInBytes,
/// Cache operation
CacheOperation::Kind cache_op = CacheOperation::Always>
struct cp_async_zfill;
/// Initiates an asynchronous copy from global memory to shared memory. Rather than predicate
/// the entire transfer, nans (0x7eff) are written to SMEM if the guard predicate is false.
///
/// LDGSTS
///
template <
/// Size of the access in bytes
int SizeInBytes,
/// Cache operation
CacheOperation::Kind cache_op = CacheOperation::Always>
struct cp_async_nan;
/// Either 0 or 1 are written to SMEM based on input element type
/// Used for diagonal elements of triangular matrix of BLAS3 functions
///
/// STS
///
template <
/// Type of Element
typename Element,
/// If the data is for a Hermitian matrix diagonal
bool IsHermitianData = false>
struct cp_async_diag;
static const uint32_t OOB_NAN_F16 = 0x7eff;
static const uint32_t OOB_NAN_F16x2 = ((OOB_NAN_F16 << 16) | OOB_NAN_F16);
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization
template <
/// Size of the access in bytes
int SizeInBytes>
struct cp_async<SizeInBytes, CacheOperation::Always> {
/// Copy
CUTLASS_DEVICE
cp_async(void *smem_ptr, void const *global_ptr, bool pred_guard = true) {
#if CUDA_CP_ASYNC_ACTIVATED
// Make sure the size is supported.
static_assert((SizeInBytes == 4 || SizeInBytes == 8 || SizeInBytes == 16),
"Size is not supported");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %0, 0;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p cp.async.ca.shared.global.L2::128B [%1], [%2], %3;\n"
#else
" @p cp.async.ca.shared.global [%1], [%2], %3;\n"
#endif
"}\n" ::"r"((int)pred_guard),
"r"(smem_int_ptr), "l"(global_ptr), "n"(SizeInBytes));
#else
using AccessType = Array<uint8_t, SizeInBytes>;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
}
#endif
}
};
/// Partial specialization
template <
/// Size of the access in bytes
int SizeInBytes>
struct cp_async_zfill<SizeInBytes, CacheOperation::Always> {
/// Copy with zero fill
CUTLASS_DEVICE
cp_async_zfill(void *smem_ptr, void const *global_ptr, bool pred_guard) {
#if CUDA_CP_ASYNC_ACTIVATED
// Make sure the size is supported.
static_assert((SizeInBytes == 4 || SizeInBytes == 8 || SizeInBytes == 16),
"Size is not supported");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
int src_in_bytes = (pred_guard ? SizeInBytes : 0);
asm volatile(
#if CUTLASS_ENABLE_L2_PREFETCH
"cp.async.ca.shared.global.L2::128B [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
#else
"cp.async.ca.shared.global [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
#endif
"l"(global_ptr), "n"(SizeInBytes), "r"(src_in_bytes));
#else
using AccessType = Array<uint8_t, SizeInBytes>;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
}
else {
AccessType zeros;
zeros.clear();
*static_cast<AccessType *>(smem_ptr) = zeros;
}
#endif
}
};
/// Partial specialization
template <>
struct cp_async_nan<16, CacheOperation::Always> {
static int const kSizeInBytes = 16;
/// Copy with nan fill
CUTLASS_DEVICE
cp_async_nan(void *smem_ptr, void const *global_ptr, bool pred_guard) {
#if CUDA_CP_ASYNC_ACTIVATED
static __constant__ uint4 OOB_NAN_F16x8 = {OOB_NAN_F16x2, OOB_NAN_F16x2,
OOB_NAN_F16x2, OOB_NAN_F16x2};
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %0, 0;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p cp.async.ca.shared.global.L2::128B [%1], [%2], %3;\n"
#else
" @p cp.async.ca.shared.global [%1], [%2], %3;\n"
#endif
" @!p st.shared.v4.u32 [%1], {%4, %5, %6, %7};\n"
"}\n"
:
: "r"((int)pred_guard), "r"(smem_int_ptr), "l"(global_ptr),
"n"(kSizeInBytes), "r"(OOB_NAN_F16x8.x), "r"(OOB_NAN_F16x8.y), "r"(OOB_NAN_F16x8.z),
"r"(OOB_NAN_F16x8.w));
#else
CUTLASS_UNUSED(smem_ptr);
CUTLASS_UNUSED(global_ptr);
CUTLASS_UNUSED(pred_guard);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Partial specialization to write one (1)
template<typename Element_>
struct cp_async_diag <Element_, false> {
using Element = Element_;
CUTLASS_DEVICE
cp_async_diag(void *smem_ptr) {
#if CUDA_CP_ASYNC_ACTIVATED
/// Values for the diagonal elements of the triangular input matrix
static __constant__ uint2 DIAG_DATA_DOUBLE_ONE = {0x3ff00000, 0x00000000};
static __constant__ uint1 DIAG_DATA_FLOAT_ONE = {0x3f800000};
static __constant__ uint1 DIAG_DATA_ZERO = {0x00000000};
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
if (platform::is_same<Element, complex<double>>::value) {
asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_DOUBLE_ONE.y), "r"(DIAG_DATA_DOUBLE_ONE.x),
"r"(DIAG_DATA_ZERO.x), "r"(DIAG_DATA_ZERO.x));
} else if (platform::is_same<Element, complex<float>>::value) {
asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_FLOAT_ONE.x), "r"(DIAG_DATA_ZERO.x));
} else if (platform::is_same<Element, double>::value) {
asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_DOUBLE_ONE.y),"r"(DIAG_DATA_DOUBLE_ONE.x));
} else if (platform::is_same<Element, float>::value) {
asm volatile("st.shared.u32 [%0], %1;\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_FLOAT_ONE.x));
} else {
CUTLASS_UNUSED(smem_int_ptr);
CUTLASS_NOT_IMPLEMENTED();
}
#else
CUTLASS_UNUSED(smem_ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
/// Partial specialization to write zero for the imaginary part of Hermitian data
template<typename Element_>
struct cp_async_diag <Element_, true> {
using Element = Element_;
CUTLASS_DEVICE
cp_async_diag(void *smem_ptr) {
#if CUDA_CP_ASYNC_ACTIVATED
/// Values for the diagonal elements of the triangular input matrix
static __constant__ uint1 DIAG_DATA_ZERO = {0x00000000};
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
if (platform::is_same<Element, complex<double>>::value) {
asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_ZERO.x), "r"(DIAG_DATA_ZERO.x));
} else if (platform::is_same<Element, complex<float>>::value) {
asm volatile("st.shared.u32 [%0], %1;\n"
: :
"r"(smem_int_ptr), "r"(DIAG_DATA_ZERO.x));
} else {
CUTLASS_UNUSED(smem_int_ptr);
CUTLASS_NOT_IMPLEMENTED();
}
#else
CUTLASS_UNUSED(smem_ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization
template <
/// Size of the access in bytes
int SizeInBytes>
struct cp_async<SizeInBytes, CacheOperation::Global> {
/// Copy
CUTLASS_DEVICE
cp_async(void *smem_ptr, void const *global_ptr, bool pred_guard = true) {
#if CUDA_CP_ASYNC_ACTIVATED
static_assert(SizeInBytes == 16,
"cp.async only supports CacheOperation::Global when access size is 16B.");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %0, 0;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p cp.async.cg.shared.global.L2::128B [%1], [%2], %3;\n"
#else
" @p cp.async.cg.shared.global [%1], [%2], %3;\n"
#endif
"}\n" ::"r"((int)pred_guard),
"r"(smem_int_ptr), "l"(global_ptr), "n"(SizeInBytes));
#else
using AccessType = Array<uint8_t, SizeInBytes>;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
}
#endif
}
};
/// Partial specialization
template <
/// Size of the access in bytes
int SizeInBytes>
struct cp_async_zfill<SizeInBytes, CacheOperation::Global> {
/// Copy with zero fill
CUTLASS_DEVICE
cp_async_zfill(void *smem_ptr, void const *global_ptr, bool pred_guard = true) {
#if CUDA_CP_ASYNC_ACTIVATED
static_assert(SizeInBytes == 16,
"cp.async only supports CacheOperation::Global when access size is 16B.");
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
int src_in_bytes = (pred_guard ? SizeInBytes : 0);
asm volatile(
#if CUTLASS_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
#else
"cp.async.cg.shared.global [%0], [%1], %2, %3;\n" ::"r"(smem_int_ptr),
#endif
"l"(global_ptr), "n"(SizeInBytes), "r"(src_in_bytes));
#else
using AccessType = Array<uint8_t, SizeInBytes>;
if (pred_guard) {
*static_cast<AccessType *>(smem_ptr) = *static_cast<AccessType const *>(global_ptr);
}
else {
AccessType zeros;
zeros.clear();
*static_cast<AccessType *>(smem_ptr) = zeros;
}
#endif
}
};
/// Partial specialization
template <>
struct cp_async_nan<16, CacheOperation::Global> {
static int const kSizeInBytes = 16;
/// Copy with nan fill
CUTLASS_DEVICE
cp_async_nan(void *smem_ptr, void const *global_ptr, bool pred_guard) {
#if CUDA_CP_ASYNC_ACTIVATED
static __constant__ uint4 OOB_NAN_F16x8 = {OOB_NAN_F16x2, OOB_NAN_F16x2,
OOB_NAN_F16x2, OOB_NAN_F16x2};
unsigned smem_int_ptr = cutlass_get_smem_pointer(smem_ptr);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %0, 0;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p cp.async.cg.shared.global.L2::128B [%1], [%2], %3;\n"
#else
" @p cp.async.cg.shared.global [%1], [%2], %3;\n"
#endif
" @!p st.shared.v4.u32 [%1], {%4, %5, %6, %7};\n"
"}\n"
:
: "r"((int)pred_guard), "r"(smem_int_ptr), "l"(global_ptr),
"n"(kSizeInBytes), "r"(OOB_NAN_F16x8.x), "r"(OOB_NAN_F16x8.y), "r"(OOB_NAN_F16x8.z),
"r"(OOB_NAN_F16x8.w));
#else
CUTLASS_UNUSED(smem_ptr);
CUTLASS_UNUSED(global_ptr);
CUTLASS_UNUSED(pred_guard);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Establishes an ordering w.r.t previously issued cp.async instructions. Does not block.
CUTLASS_DEVICE
void cp_async_fence() {
#if CUDA_CP_ASYNC_ACTIVATED
asm volatile("cp.async.commit_group;\n" ::);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Blocks until all but <N> previous cp.async.commit_group operations have committed.
template <int N>
CUTLASS_DEVICE void cp_async_wait() {
#if CUDA_CP_ASYNC_ACTIVATED
asm volatile("cp.async.wait_group %0;\n" ::"n"(N));
#endif
}
/// Blocks until all previous cp.async.commit_group operations have committed.
template <>
CUTLASS_DEVICE void cp_async_wait<0>() {
#if CUDA_CP_ASYNC_ACTIVATED
asm volatile("cp.async.wait_all;\n" ::);
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 15,154 | C | 31.45182 | 100 | 0.57602 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/simd_sm60.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing SIMD operators for SM60
*/
#pragma once
#include "simd.h"
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Element-wise operators - specialized for half_t x 2
//
CUTLASS_HOST_DEVICE
template <>
Array<half_t, 2> operator*(Array<half_t, 2> const &a, Array<half_t, 2> const &b) {
Array<half_t, 2> d;
// TODO
return d;
}
CUTLASS_HOST_DEVICE
template <>
Array<half_t, 2> operator+(AArray<half_t, 2> const &a, Array<half_t, 2> const &b) {
Array<half_t, 2> d;
// TODO
return d;
}
CUTLASS_HOST_DEVICE
template <>
Array<half_t, 2> operator-(Array<half_t, 2> const &a, Array<half_t, 2> const &b) {
Array<T, N> d;
// TODO
return d;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Multiply-accumulate operators - specialized for half_t x 2
CUTLASS_HOST_DEVICE
template <>
Array<half_t, 2> mac(Array<half_t, 2> const &a, Array<half_t, 2> const &b, Array<half_t, 2> const &c) {
Array<half_t, 2> d;
// TODO
return d;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Dot product operator - specialized for half_t <- (half_t * half_t) x 2 + half_t
CUTLASS_HOST_DEVICE
template <>
half_t dot(Array<half_t, 2> const &a, Array<half_t, 2> const &b, half_t accum) {
// TODO
return accum;
}
/// Dot product operator - specialized for float <- (half_t * half_t) x 2 + float
CUTLASS_HOST_DEVICE
template <>
float dot(Array<half_t, 2> const &a, Array<half_t, 2> const &b, float accum) {
// TODO
return accum;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| 3,656 | C | 30.25641 | 103 | 0.597101 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/memory.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Architecture-specific operators on memory
*/
#pragma once
#include "cutlass/cutlass.h"
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Fragment type to store loaded data
typename AccessType,
/// The bytes of loading
int LoadBytes
>
struct global_load;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
#if (((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 4)) || \
(__CUDACC_VER_MAJOR__ > 11)) && \
defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750) && \
! (defined(__clang__) && defined(__CUDA__))
#define CUTLASS_ENABLE_L2_PREFETCH 1
#else
#define CUTLASS_ENABLE_L2_PREFETCH 0
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
// The redundant mov PTX instruction is used to enforce the compiler to
// keep the initializing code before ld.global
template <typename AccessType>
struct global_load<AccessType,
32
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint4 *data = reinterpret_cast<uint4 *>(&D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %9, 0;\n"
" mov.b32 %0, %10;\n"
" mov.b32 %1, %11;\n"
" mov.b32 %2, %12;\n"
" mov.b32 %3, %13;\n"
" mov.b32 %4, %14;\n"
" mov.b32 %5, %15;\n"
" mov.b32 %6, %16;\n"
" mov.b32 %7, %17;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.v4.u32 {%0, %1, %2, %3}, [%8];\n"
" @p ld.global.L2::128B.v4.u32 {%4, %5, %6, %7}, [%18];\n"
#else
" @p ld.global.v4.u32 {%0, %1, %2, %3}, [%8];\n"
" @p ld.global.v4.u32 {%4, %5, %6, %7}, [%18];\n"
#endif
"}\n"
: "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w),
"=r"(data[1].x), "=r"(data[1].y), "=r"(data[1].z), "=r"(data[1].w)
: "l"(ptr), "r"((int)pred_guard), "r"(data[0].x), "r"(data[0].y),
"r"(data[0].z), "r"(data[0].w), "r"(data[1].x), "r"(data[1].y),
"r"(data[1].z), "r"(data[1].w), "l"(((uint8_t *)ptr) + 16));
}
};
template <typename AccessType>
struct global_load<AccessType,
16
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint4 &data = reinterpret_cast<uint4 &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" mov.b32 %0, %6;\n"
" mov.b32 %1, %7;\n"
" mov.b32 %2, %8;\n"
" mov.b32 %3, %9;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.v4.u32 {%0, %1, %2, %3}, [%4];\n"
#else
" @p ld.global.v4.u32 {%0, %1, %2, %3}, [%4];\n"
#endif
"}\n"
: "=r"(data.x), "=r"(data.y), "=r"(data.z), "=r"(data.w)
: "l"(ptr), "r"((int)pred_guard), "r"(data.x), "r"(data.y), "r"(data.z), "r"(data.w));
}
};
template <typename AccessType>
struct global_load<AccessType,
8
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint2 &data = reinterpret_cast<uint2 &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %3, 0;\n"
" mov.b32 %0, %4;\n"
" mov.b32 %1, %5;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.v2.u32 {%0, %1}, [%2];\n"
#else
" @p ld.global.v2.u32 {%0, %1}, [%2];\n"
#endif
"}\n"
: "=r"(data.x), "=r"(data.y)
: "l"(ptr), "r"((int)pred_guard), "r"(data.x), "r"(data.y));
}
};
template <typename AccessType>
struct global_load<AccessType,
4
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
unsigned &data = reinterpret_cast<unsigned &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" mov.b32 %0, %3;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.u32 %0, [%1];\n"
#else
" @p ld.global.u32 %0, [%1];\n"
#endif
"}\n"
: "=r"(data)
: "l"(ptr), "r"((int)pred_guard), "r"(data));
}
};
template <typename AccessType>
struct global_load<AccessType,
2
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
uint16_t &data = reinterpret_cast<uint16_t &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" mov.b16 %0, %3;\n"
#if CUTLASS_ENABLE_L2_PREFETCH
" @p ld.global.L2::128B.u16 %0, [%1];\n"
#else
" @p ld.global.u16 %0, [%1];\n"
#endif
"}\n"
: "=h"(data)
: "l"(ptr), "r"((int)pred_guard), "h"(data));
}
};
template <typename AccessType>
struct global_load<AccessType,
1
> {
CUTLASS_DEVICE
global_load(AccessType &D, void const *ptr, bool pred_guard) {
if (pred_guard) D = *(reinterpret_cast<AccessType const *>(ptr));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Fragment type to store data
typename AccessType,
/// The bytes of storing
int StoreBytes
>
struct global_store;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType>
struct global_store<AccessType, 64> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint4 const *data = reinterpret_cast<uint4 const *>(&D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" @p st.global.v4.u32 [%0], {%1, %2, %3, %4};\n"
" @p st.global.v4.u32 [%6], {%7, %8, %9, %10};\n"
" @p st.global.v4.u32 [%11], {%12, %13, %14, %15};\n"
" @p st.global.v4.u32 [%16], {%17, %18, %19, %20};\n"
"}\n"
:
: "l"(ptr), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z),
"r"(data[0].w), "r"((int)pred_guard), "l"(((uint8_t *)ptr) + 16),
"r"(data[1].x), "r"(data[1].y), "r"(data[1].z), "r"(data[1].w),
"l"(((uint8_t *)ptr) + 32),
"r"(data[2].x), "r"(data[2].y), "r"(data[2].z), "r"(data[2].w),
"l"(((uint8_t *)ptr) + 48),
"r"(data[3].x), "r"(data[3].y), "r"(data[3].z), "r"(data[3].w));
}
};
template <typename AccessType>
struct global_store<AccessType, 32> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint4 const *data = reinterpret_cast<uint4 const *>(&D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" @p st.global.v4.u32 [%0], {%1, %2, %3, %4};\n"
" @p st.global.v4.u32 [%6], {%7, %8, %9, %10};\n"
"}\n"
:
: "l"(ptr), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z),
"r"(data[0].w), "r"((int)pred_guard), "l"(((uint8_t *)ptr) + 16),
"r"(data[1].x), "r"(data[1].y), "r"(data[1].z), "r"(data[1].w));
}
};
template <typename AccessType>
struct global_store<AccessType, 16> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint4 const &data = reinterpret_cast<uint4 const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %5, 0;\n"
" @p st.global.v4.u32 [%0], {%1, %2, %3, %4};\n"
"}\n"
:
: "l"(ptr), "r"(data.x), "r"(data.y), "r"(data.z), "r"(data.w), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 8> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint2 const &data = reinterpret_cast<uint2 const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %3, 0;\n"
" @p st.global.v2.u32 [%0], {%1, %2};\n"
"}\n"
:
: "l"(ptr), "r"(data.x), "r"(data.y), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 4> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint32_t const &data = reinterpret_cast<uint32_t const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" @p st.global.u32 [%0], %1;\n"
"}\n"
:
: "l"(ptr), "r"(data), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 2> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
uint16_t const &data = reinterpret_cast<uint16_t const &>(D);
asm volatile(
"{\n"
" .reg .pred p;\n"
" setp.ne.b32 p, %2, 0;\n"
" @p st.global.u16 [%0], %1;\n"
"}\n"
:
: "l"(ptr), "h"(data), "r"((int)pred_guard));
}
};
template <typename AccessType>
struct global_store<AccessType, 1> {
CUTLASS_DEVICE
global_store(AccessType const &D, void *ptr, bool pred_guard) {
if (pred_guard) *(reinterpret_cast<AccessType *>(ptr)) = D;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// ld.shared
template <int Bytes>
CUTLASS_DEVICE
void shared_load(void *dst, uint32_t ptr);
/// ld.shared - 16b
template <>
CUTLASS_DEVICE
void shared_load<2>(void *dst, uint32_t ptr) {
asm volatile("ld.shared.u16 %0, [%1];\n"
: "=h"(*reinterpret_cast<uint16_t *>(dst))
: "r"(ptr));
}
/// ld.shared - 32b
template <>
CUTLASS_DEVICE
void shared_load<4>(void *dst, uint32_t ptr) {
asm volatile("ld.shared.u32 %0, [%1];\n"
: "=r"(*reinterpret_cast<uint32_t *>(dst))
: "r"(ptr));
}
/// ld.shared - 64b
template <>
CUTLASS_DEVICE
void shared_load<8>(void *dst, uint32_t ptr) {
uint2 *dst_u64 = reinterpret_cast<uint2 *>(dst);
asm volatile("ld.shared.v2.u32 {%0, %1}, [%2];\n"
:
"=r"(dst_u64->x),
"=r"(dst_u64->y)
: "r"(ptr));
}
/// ld.shared - 128b
template <>
CUTLASS_DEVICE
void shared_load<16>(void *dst, uint32_t ptr) {
uint4 *dst_u128 = reinterpret_cast<uint4 *>(dst);
asm volatile("ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n"
:
"=r"(dst_u128->x),
"=r"(dst_u128->y),
"=r"(dst_u128->z),
"=r"(dst_u128->w)
: "r"(ptr));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// st.shared
template <int Bytes>
CUTLASS_DEVICE
void shared_store(uint32_t ptr, void const *src);
/// st.shared - 16b
template <>
CUTLASS_DEVICE
void shared_store<2>(uint32_t ptr, void const *src) {
asm volatile("st.shared.u16 [%0], %1;\n"
: :
"r"(ptr),
"h"(*reinterpret_cast<uint16_t const *>(src))
);
}
/// st.shared - 32b
template <>
CUTLASS_DEVICE
void shared_store<4>(uint32_t ptr, void const *src) {
asm volatile("st.shared.u32 [%0], %1;\n"
: :
"r"(ptr),
"r"(*reinterpret_cast<uint32_t const *>(src))
);
}
/// st.shared - 64b
template <>
CUTLASS_DEVICE
void shared_store<8>(uint32_t ptr, void const *src) {
uint2 const *dst_u64 = reinterpret_cast<uint2 const *>(src);
asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
: :
"r"(ptr),
"r"(dst_u64->x),
"r"(dst_u64->y)
);
}
/// st.shared - 128b
template <>
CUTLASS_DEVICE
void shared_store<16>(uint32_t ptr, void const *src) {
uint4 const *dst_u128 = reinterpret_cast<uint4 const *>(src);
asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};\n"
: :
"r"(ptr),
"r"(dst_u128->x),
"r"(dst_u128->y),
"r"(dst_u128->z),
"r"(dst_u128->w)
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "memory_sm75.h"
#include "memory_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
| 14,313 | C | 29.134737 | 100 | 0.490603 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/wmma.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for warp matrix multiply-add (WMMA) operations
*/
#pragma once
// CUTLASS WMMA does not support clang at present.
#if !(defined(__clang__) && defined(__CUDA__))
#if (__CUDACC_VER_MAJOR__ >= 9)
#if (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700))
#define CUTLASS_ARCH_WMMA_ENABLED
#define CUTLASS_ARCH_WMMA_SM70_ENABLED
#endif
#endif
#if (__CUDACC_VER_MAJOR__ >= 10)
#if (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 720))
#define CUTLASS_ARCH_INTEGER_MATRIX_MULTIPLY_ENABLED
#define CUTLASS_ARCH_WMMA_SM72_ENABLED
#endif
#endif
#if (__CUDACC_VER_MAJOR__ >= 10)
#if (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 750))
#define CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED
#define CUTLASS_ARCH_WMMA_SM75_ENABLED
#endif
#endif
#endif //!(defined(__clang__) && defined(__CUDA__))
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include <mma.h>
#include "cutlass/arch/mma.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically maps cutlass data types => nvcuda::wmma data types
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Type_>
struct CutlassToWmmaDataType{
using Type = Type_;
};
/// Statically maps cutlass::half_t => __half
template<>
struct CutlassToWmmaDataType<cutlass::half_t> {
using Type = __half;
};
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) && (__CUDACC_VER_MAJOR__ >= 11)
template<>
struct CutlassToWmmaDataType<cutlass::bfloat16_t> {
using Type = __nv_bfloat16;
};
#endif
/// Statically maps int8_t => char
template<>
struct CutlassToWmmaDataType<int8_t> {
using Type = signed char;
};
/// Statically maps uint8_t => char
template<>
struct CutlassToWmmaDataType<uint8_t> {
using Type = unsigned char;
};
/// Statically maps int32_t => int
template<>
struct CutlassToWmmaDataType<int32_t> {
using Type = int;
};
#if defined(CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED)
/// Statically maps cutlass::int4b_t => experimental::precision::s4
template<>
struct CutlassToWmmaDataType<cutlass::int4b_t> {
using Type = nvcuda::wmma::experimental::precision::s4;
};
/// Statically maps cutlass::uint4b_t => experimental::precision::s4
template<>
struct CutlassToWmmaDataType<cutlass::uint4b_t> {
using Type = nvcuda::wmma::experimental::precision::u4;
};
/// Statically maps cutlass::uint1b_t => experimental::precision::b1
template<>
struct CutlassToWmmaDataType<cutlass::uint1b_t> {
using Type = nvcuda::wmma::experimental::precision::b1;
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically maps cutlass::layout => nvcuda::wmma layout tags
////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Layout_>
struct CutlassToWmmaLayout {
};
/// Statically maps cutlass::layout::RowMajor => nvcuda::wmma::row_major layout tags
template <>
struct CutlassToWmmaLayout<cutlass::layout::RowMajor> {
using Layout = nvcuda::wmma::row_major;
static nvcuda::wmma::layout_t const value = nvcuda::wmma::layout_t::mem_row_major;
};
////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically maps cutlass::layout::RowMajor => nvcuda::wmma::row_major layout tags
////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct CutlassToWmmaLayout<cutlass::layout::ColumnMajor> {
using Layout = nvcuda::wmma::col_major;
static nvcuda::wmma::layout_t const value = nvcuda::wmma::layout_t::mem_col_major;
};
////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically maps nvcuda::wmma data types => cutlass data types
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Type_>
struct WmmaToCutlassDataType{
using Type = Type_;
};
/// Statically maps __half => cutlass::half_t
template<>
struct WmmaToCutlassDataType<__half> {
using Type = cutlass::half_t;
};
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) && (__CUDACC_VER_MAJOR__ >= 11)
template<>
struct WmmaToCutlassDataType<__nv_bfloat16> {
using Type = cutlass::bfloat16_t;
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// WMMA template structure defines nvcuda::wmma::fragments and static assertion chaeks
// for a specific template paramterized data type (Element[A|B|C]), layout (Layout[A|B|C]),
// and native wmma size (Shape)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_, ///< Size of the matrix product (concept: GemmShape)
typename ElementA_, ///< Data type of A elements
typename LayoutA_, ///< Layout of A matrix (concept: MatrixLayout)
typename ElementB_, ///< Data type of B elements
typename LayoutB_, ///< Layout of B matrix (concept: MatrixLayout)
typename ElementC_, ///< Element type of C matrix
typename LayoutC_, /// Layout of C matrix (concept: MatrixLayout)
typename Operator_ = cutlass::arch::OpMultiplyAdd ///< Inner product operator (multiply-add, xor.popc)
>
struct Wmma;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations for each compute capability
//
#ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED
#include "cutlass/arch/wmma_sm70.h"
#endif
#ifdef CUTLASS_ARCH_WMMA_SM72_ENABLED
#include "cutlass/arch/wmma_sm72.h"
#endif
#ifdef CUTLASS_ARCH_WMMA_SM75_ENABLED
#include "cutlass/arch/wmma_sm75.h"
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
#endif //CUTLASS_ARCH_WMMA_ENABLED
| 8,473 | C | 36.830357 | 106 | 0.555647 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/mma_sm61.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#include "cutlass/layout/matrix.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <typename LayoutA, typename LayoutB, typename LayoutC>
struct Mma<
gemm::GemmShape<1,1,4>,
1,
int8_t,
LayoutA,
int8_t,
LayoutB,
int,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 4>;
using Operator = OpMultiplyAdd;
using ElementC = int;
CUTLASS_HOST_DEVICE
void operator()(
Array<int, 1> &d,
Array<int8_t, 4> const &a,
Array<int8_t, 4> const &b,
Array<int, 1> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 610))
unsigned const &A = reinterpret_cast<unsigned const &>(a);
unsigned const &B = reinterpret_cast<unsigned const &>(b);
asm volatile("dp4a.s32.s32 %0, %1, %2, %3;"
: "=r"(d[0])
: "r"(A), "r"(B), "r"(c[0]));
#else
d[0] = c[0];
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < 4; ++k) {
d[0] += a[k] * b[k];
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <typename LayoutC>
struct Mma<
gemm::GemmShape<1, 1, 2>,
1,
int16_t,
layout::RowMajor,
int16_t,
layout::ColumnMajor,
int,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 1, 2>;
using Operator = OpMultiplyAdd;
using ElementC = int;
CUTLASS_HOST_DEVICE
void operator()(
Array<int, 1> &d,
Array<int16_t, 2> const &a,
Array<int16_t, 2> const &b,
Array<int, 1> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 610))
unsigned const &A = reinterpret_cast<unsigned const &>(a);
unsigned const &B = reinterpret_cast<unsigned const &>(b);
asm volatile("dp2a.s32.s32 %0, %1, %2, %3;"
: "=r"(d[0])
: "r"(A), "r"(B), "r"(c[0]));
#else
d[0] = c[0];
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < 2; ++k) {
d[0] += a[k] * b[k];
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
| 4,193 | C | 28.328671 | 100 | 0.56165 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/cache_operation.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Directives related to cache operations
*/
#pragma once
#include "cutlass/cutlass.h"
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Controls PTX cache operations
struct CacheOperation {
enum Kind {
/// Cache at all levels - accessed again
Always,
/// Cache at global level
Global,
/// Streaming - likely to be accessed once
Streaming,
/// Indicates the line will not be used again
LastUse,
/// Don't cache, and fetch again
Volatile,
/// Write back at all coherent levels
WriteBack,
/// Write through to system memory
WriteThrough
};
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| 2,691 | C | 39.179104 | 100 | 0.629877 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/memory_sm75.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Architecture-specific operators on memory added for SM75
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Layout of destination matrix (column-major implies transpose)
typename Layout,
/// .x1, .x2, or .x4
int MatrixCount
>
inline __device__ void ldsm(Array<unsigned, MatrixCount> & D, void const* ptr);
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Determine the appropriate way to target PTX's "ldmatrix" instruction.
//
/////////////////////////////////////////////////////////////////////////////////////////////////
#if (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2) || (__CUDACC_VER_MAJOR__ >= 11)
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750)
#define CUDA_LDMATRIX_ACTIVATED 1
#endif
#define CUDA_LDMATRIX_SUPPORTED 1
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/*
#if ! defined(CUDA_NVVM_GET_SMEM_POINTER_SUPPORTED) && (__CUDACC_VER_MAJOR__ > 10)
#define CUDA_NVVM_GET_SMEM_POINTER_SUPPORTED 1
#endif
#if ! defined(CUDA_NVVM_GET_SMEM_POINTER_SUPPORTED)
#define CUDA_NVVM_GET_SMEM_POINTER_SUPPORTED ((__CUDACC_VER_MAJOR__ == 10) && (__CUDACC_VER_MINOR__ >= 1))
#endif
#if ! defined(CUDA_NVVM_GET_SMEM_POINTER_ENABLED)
#define CUDA_NVVM_GET_SMEM_POINTER_ENABLED CUDA_NVVM_GET_SMEM_POINTER_SUPPORTED
#endif
*/
#if (! defined (__clang__) && __CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2)
extern "C" {
//
// This NVVM intrinsic is subject to change in future versions of CUDA.
// Clients should not call it directly. Rather, they should use the
// cutlass::arch::ldsm<>() template.
//
__device__ uint32_t __nvvm_get_smem_pointer(void *);
}
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/// CUTLASS helper to get SMEM pointer
inline __device__ unsigned cutlass_get_smem_pointer(void *ptr) {
// We prefer to use the new CVTA intrinsics if they are available, otherwise we will fall back to
// the previous internal intrinsics if they are available.
#if (! defined (__clang__) && defined(__CUDA_ARCH__) && __CUDACC_VER_MAJOR__ >= 11)
//
// This NVVM intrinsic converts an address in shared memory to a plain
// unsigned integer. This is necessary to pass to shared memory instructions
// in inline PTX.
//
// In CUDA 11 and beyond, this replaces __nvvm_get_smem_pointer() [only available in 10.2].
//
//__device__ size_t __cvta_generic_to_shared(void* ptr);
/// CUTLASS helper to get SMEM pointer
return static_cast<unsigned>(__cvta_generic_to_shared(ptr));
#elif (! defined (__clang__) && defined(__CUDA_ARCH__) && __CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2)
return __nvvm_get_smem_pointer(ptr);
#elif defined(__CUDA_ARCH__)
uint32_t smem_ptr;
asm(
"{ .reg .u64 smem_ptr; cvta.to.shared.u64 smem_ptr, %1; cvt.u32.u64 %0, smem_ptr; }\n"
: "=r"(smem_ptr) : "l"(ptr));
return smem_ptr;
#else
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
return 0;
#endif
}
/// CUTLASS helper to get SMEM pointer
inline __device__ unsigned cutlass_get_smem_pointer(void const *ptr) {
return cutlass_get_smem_pointer(const_cast<void *>(ptr));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::RowMajor, 1>(
Array<unsigned, 1> & D,
void const* ptr) {
#if defined(CUDA_LDMATRIX_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x;
asm volatile ("ldmatrix.sync.aligned.x1.m8n8.shared.b16 {%0}, [%1];" : "=r"(x) : "r"(addr));
reinterpret_cast<int &>(D) = x;
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::RowMajor, 2>(
Array<unsigned, 2> & D,
void const* ptr) {
#if defined(CUDA_LDMATRIX_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y;
asm volatile ("ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0, %1}, [%2];" : "=r"(x), "=r"(y) : "r"(addr));
reinterpret_cast<int2 &>(D) = make_int2(x, y);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::RowMajor, 4>(
Array<unsigned, 4> & D,
void const* ptr) {
#if defined(CUDA_LDMATRIX_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y, z, w;
asm volatile ("ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];" : "=r"(x), "=r"(y), "=r"(z), "=r"(w) : "r"(addr));
reinterpret_cast<int4 &>(D) = make_int4(x, y, z, w);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Transpose on 16b granularity
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::ColumnMajor, 1>(
Array<unsigned, 1> & D,
void const* ptr) {
#if CUDA_LDMATRIX_ACTIVATED
unsigned addr = cutlass_get_smem_pointer(ptr);
int x;
asm volatile ("ldmatrix.sync.aligned.x1.trans.m8n8.shared.b16 {%0}, [%1];" : "=r"(x) : "r"(addr));
reinterpret_cast<int &>(D) = x;
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::ColumnMajor, 2>(
Array<unsigned, 2> & D,
void const* ptr) {
#if defined(CUDA_LDMATRIX_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y;
asm volatile ("ldmatrix.sync.aligned.x2.trans.m8n8.shared.b16 {%0, %1}, [%2];" : "=r"(x), "=r"(y) : "r"(addr));
reinterpret_cast<int2 &>(D) = make_int2(x, y);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
inline __device__ void ldsm<layout::ColumnMajor, 4>(
Array<unsigned, 4> & D,
void const* ptr) {
#if defined(CUDA_LDMATRIX_ACTIVATED)
unsigned addr = cutlass_get_smem_pointer(ptr);
int x, y, z, w;
asm volatile ("ldmatrix.sync.aligned.x4.trans.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];" : "=r"(x), "=r"(y), "=r"(z), "=r"(w) : "r"(addr));
reinterpret_cast<int4 &>(D) = make_int4(x, y, z, w);
#else
CUTLASS_UNUSED(D);
CUTLASS_UNUSED(ptr);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType, int Bytes>
struct shared_load_op {
CUTLASS_DEVICE
shared_load_op(AccessType &D, void const *ptr) {
D = *reinterpret_cast<AccessType const *>(ptr);
}
};
template <typename AccessType>
CUTLASS_DEVICE void shared_load(AccessType &D, void const *ptr) {
shared_load_op<AccessType, int(sizeof(AccessType))>(D, ptr);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType>
struct shared_load_op<AccessType, 16> {
CUTLASS_DEVICE
shared_load_op(AccessType &D, void const *ptr) {
unsigned addr = cutlass_get_smem_pointer(ptr);
uint4 v;
asm volatile ("ld.shared.v4.b32 {%0, %1, %2, %3}, [%4];" :
"=r"(v.x), "=r"(v.y), "=r"(v.z), "=r"(v.w) : "r"(addr));
D = reinterpret_cast<AccessType const &>(v);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename AccessType>
struct shared_load_op<AccessType, 8> {
CUTLASS_DEVICE
shared_load_op(AccessType &D, void const *ptr) {
unsigned addr = cutlass_get_smem_pointer(ptr);
uint2 v;
asm volatile ("ld.shared.v2.b32 {%0, %1}, [%2];" :
"=r"(v.x), "=r"(v.y) : "r"(addr));
D = reinterpret_cast<AccessType const &>(v);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| 10,490 | C | 29.855882 | 141 | 0.545472 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/arch/simd_sm61.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing SIMD operators for SM61
*/
#pragma once
#include "simd.h"
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Dot product operator - specialized for int32_t <- (int8_t * int8_t) x 4 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<int8_t, 4> const &a, Array<int8_t, 4> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (uint8_t * int8_t) x 4 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<uint8_t, 4> const &a, Array<int8_t, 4> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (int8_t * uint8_t) x 4 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<int8_t, 4> const &a, Array<uint8_t, 4> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (uint8_t * uint8_t) x 4 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<uint8_t, 4> const &a, Array<uint8_t, 4> const &b, int32_t accum) {
return accum;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Dot product operator - specialized for int32_t <- (int16_t * int8_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<int16_t, 2> const &a, Array<int8_t, 2> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (uint16_t * int8_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<uint16_t, 2> const &a, Array<int8_t, 2> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (int16_t * int8_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<int16_t, 2> const &a, Array<uint8_t, 2> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (uint16_t * int8_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<uint16_t, 2> const &a, Array<uint8_t, 2> const &b, int32_t accum) {
return accum;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Dot product operator - specialized for int32_t <- (int16_t * int16_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<int16_t, 2> const &a, Array<int16_t, 2> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (uint16_t * int16_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<uint16_t, 2> const &a, Array<int16_t, 2> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (int16_t * int16_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<int16_t, 2> const &a, Array<uint16_t, 2> const &b, int32_t accum) {
return accum;
}
/// Dot product operator - specialized for int32_t <- (uint16_t * int16_t) x 2 + int32_t
CUTLASS_HOST_DEVICE
template <>
int32_t dot(Array<uint16_t, 2> const &a, Array<uint16_t, 2> const &b, int32_t accum) {
return accum;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
| 5,102 | C | 33.47973 | 100 | 0.624265 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/pitch_linear_thread_map.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing how threads are mapped to a given tile.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
////////////////////////////////////////////////////////////////////////////////
/// Strip-mines a pitch-linear tile among a given number of threads, first along
/// the contiguous dimension then along the strided dimension.
///
/// The tile must be divisible by the thread count such that all threads may
/// execute the same number of iterations with the same delta to exhaustively
/// cover the tile.
///
/// This class satisfies the "RegularThreadMapping" concept.
///
/// This ThreadMap is used by SIMT kernels and operand E of the sparse tensor
/// kernels.
template <
typename Shape_,
int Threads,
int ElementsPerAccess = 1
>
struct PitchLinearStripminedThreadMap {
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Number of threads total
static int const kThreads = Threads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ElementsPerAccess;
/// Shape of access by each thread
using ThreadAccessShape = layout::PitchLinearShape<kElementsPerAccess, 1>;
/// Internal implementation details
struct Detail {
static_assert(!(Shape::kContiguous % kElementsPerAccess), "");
/// Shape of the tile in units of vectors
using ShapeVec = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess,
Shape::kStrided
>;
static_assert((Threads < ShapeVec::kContiguous && !(ShapeVec::kContiguous % kThreads)) ||
(!(kThreads % ShapeVec::kContiguous)),
"Shape must be divisible by number of iterations of each thread.");
};
/// Number of iterations by each thread
using Iterations = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<
1,
// Redo the comparison here to work around divide by zero compiler
// error. The compiler evaluates both path of platform::conditional.
(Threads >= Detail::ShapeVec::kContiguous
? (Detail::ShapeVec::kStrided + (kThreads / Detail::ShapeVec::kContiguous - 1)) /
(kThreads / Detail::ShapeVec::kContiguous)
: 0)>,
layout::PitchLinearShape<Detail::ShapeVec::kContiguous / kThreads,
Detail::ShapeVec::kStrided>>::type;
/// Interval between accesses along each dimension of the tensor's logical coordinate space
/// (in units of Elements)
using Delta = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<
1,
kThreads / Detail::ShapeVec::kContiguous
>,
layout::PitchLinearShape<
kThreads * kElementsPerAccess,
1
>
>::type;
/// Shape of the tile in units of vectors
using StorageShape = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<Shape::kContiguous,
Iterations::kStrided*(kThreads / Detail::ShapeVec::kContiguous)>,
layout::PitchLinearShape<Shape::kContiguous, Shape::kStrided>>::type;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
/// (in units of Elements)
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
return TensorCoord(
(thread_id % Detail::ShapeVec::kContiguous) * kElementsPerAccess,
thread_id / Detail::ShapeVec::kContiguous);
}
};
/// This ThreadMap is used by GEMV
template <
typename Shape,
int Threads,
int ElementsPerAccess = 1
>
struct PitchLinearTilePolicyStripminedThreadContiguous
{
static_assert((Shape::kContiguous % (Threads * ElementsPerAccess)) == 0,
"Contiguous shape must divide number of threads");
using TensorCoord = layout::PitchLinearCoord;
static int const kThreads = Threads;
static int const kElementsPerAccess = ElementsPerAccess;
using Iterations = layout::PitchLinearShape<
Shape::kContiguous / (kThreads * kElementsPerAccess),
Shape::kStrided>;
using Delta = layout::PitchLinearShape<1, 1>;
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id)
{
return TensorCoord(thread_id * Iterations::kContiguous * kElementsPerAccess, 0);
}
};
template <
typename Shape,
int Threads,
int ElementsPerAccess = 1
>
struct PitchLinearTilePolicyStripminedThreadStrided
{
static_assert((Shape::kStrided % Threads == 0),
"Strided shape must divide number of threads");
using TensorCoord = layout::PitchLinearCoord;
static int const kThreads = Threads;
static int const kElementsPerAccess = ElementsPerAccess;
using Iterations = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess,
Shape::kStrided / kThreads>;
using Delta = layout::PitchLinearShape<1, 1>;
using ShapeVec = Shape;
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id)
{
return TensorCoord(0, thread_id * Iterations::kStrided);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Policy defining a warp-raked arrangement in which a shape is partitioned into contiguous
/// elements.
///
/// This ThreadMap is used by tensor core kernels.
template <
typename Shape_,
int Threads,
typename WarpThreadArrangement_,
int ElementsPerAccess = 1
>
struct PitchLinearWarpRakedThreadMap {
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Number of threads total
static int const kThreads = Threads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ElementsPerAccess;
/// Shape of access by each thread
using ThreadAccessShape = layout::PitchLinearShape<kElementsPerAccess, 1>;
/// Internal details made public to facilitate introspection
struct Detail {
/// Fixed arrangement of threads within a warp (units of threads).
using WarpThreadArrangement = WarpThreadArrangement_;
/// Number of threads per warp
static int const kWarpSize = WarpThreadArrangement::kCount;
/// Number of participating warps
static int const kWarpCount = kThreads / kWarpSize;
static_assert(
!(Shape::kContiguous % kElementsPerAccess),
"Shape must be divisible by vector length.");
/// Compute the 'shape' of the overall tile in units of vectors
using ShapeInAccesses = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess,
Shape::kStrided
>;
static_assert(
!(ShapeInAccesses::kContiguous % WarpThreadArrangement::kContiguous),
"ShapeInAccesses must be divisible by WarpThreadArrangement.");
static_assert(
!(ShapeInAccesses::kStrided % WarpThreadArrangement::kStrided),
"ShapeInAccesses must be divisible by WarpThreadArrangement.");
// compute number of warp-level accesses total
using WarpAccessIterations = layout::PitchLinearShape<
ShapeInAccesses::kContiguous / WarpThreadArrangement::kContiguous,
ShapeInAccesses::kStrided / WarpThreadArrangement::kStrided
>;
// Divide it into the number of warps, first partitioning the strided dimension then the
// contiguous.
static int const kWarpsStrided =
(WarpAccessIterations::kStrided >= kWarpCount
? kWarpCount
: WarpAccessIterations::kStrided);
static int const kWarpsContiguous =
(kWarpCount > WarpAccessIterations::kStrided
? kWarpCount / kWarpsStrided
: 1);
/// Arrangement of warps within a threadblock-scoped tile
using WarpArrangement = layout::PitchLinearShape<
kWarpsContiguous, kWarpsStrided
>;
};
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations = layout::PitchLinearShape<
Detail::WarpAccessIterations::kContiguous / Detail::kWarpsContiguous,
Detail::WarpAccessIterations::kStrided / Detail::kWarpsStrided
>;
static_assert(Iterations::kCount,
"Number of iterations must be non-zero");
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta = layout::PitchLinearShape<
Detail::WarpThreadArrangement::kContiguous * kElementsPerAccess,
Detail::WarpThreadArrangement::kStrided
>;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
int warp_id = (thread_id / Detail::kWarpSize);
int lane_id = (thread_id % Detail::kWarpSize);
//
// compute warp-level offset
//
// This is the shape of the entire area covered by a warp's memory access (in units of vectors)
layout::PitchLinearCoord warp_footprint{
Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous,
Detail::WarpThreadArrangement::kStrided * Iterations::kStrided
};
// This is the offset of a specific warp (in units of vectors)
layout::PitchLinearCoord warp_offset{
(warp_id % Detail::kWarpsContiguous),
(warp_id / Detail::kWarpsContiguous)
};
// This is the offset of a specific thread within a warp (units of vectors)
layout::PitchLinearCoord thread_offset_in_warp{
lane_id % Detail::WarpThreadArrangement::kContiguous,
lane_id / Detail::WarpThreadArrangement::kContiguous
};
// This is the offset of a thread within a threadblock tile (units of vectors)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec =
warp_footprint * warp_offset + thread_offset_in_warp;
// This is the offset of a thread within a threadblock tile (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{
thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess,
thread_offset_in_threadblock_tile_vec.strided()
};
return thread_offset_in_threadblock_tile_base;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Policy defining a warp-raked arrangement in which a shape is partitioned into contiguous
/// elements. Warps are arranged based on a stride.
///
/// This ThreadMap is used by tensor core kernels for NCxHWx layout.
template <
typename Shape_,
int Threads,
typename WarpThreadArrangement_,
int ElementsPerAccess = 1
>
struct PitchLinearStridedWarpRakedThreadMap {
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Number of threads total
static int const kThreads = Threads;
using WarpThreadArrangement = WarpThreadArrangement_;
/// Extract vector length from Layout
static int const kElementsPerAccess = ElementsPerAccess;
/// Base ThreadMap
using BaseThreadMap = PitchLinearWarpRakedThreadMap<
Shape,
kThreads,
WarpThreadArrangement,
kElementsPerAccess
>;
/// Shape of access by each thread
using ThreadAccessShape = typename BaseThreadMap::ThreadAccessShape;
struct Detail {
using WarpThreadArrangement = WarpThreadArrangement_;
using WarpAccessIterations = typename BaseThreadMap::Detail::WarpAccessIterations;
static int const kWarpSize = BaseThreadMap::Detail::kWarpSize;
static int const kWarpCount = BaseThreadMap::Detail::kWarpCount;
using ShapeInAccesses = typename BaseThreadMap::Detail::ShapeInAccesses;
// Divide it into the number of warps, first partitioning the contiguous dimension then the
// stride.
static int const kWarpsContiguous =
(WarpAccessIterations::kContiguous >= kWarpCount
? kWarpCount
: WarpAccessIterations::kContiguous);
static int const kWarpsStrided =
(kWarpCount > WarpAccessIterations::kContiguous
? kWarpCount / kWarpsContiguous
: 1);
/// Arrangement of warps within a threadblock-scoped tile
using WarpArrangement = layout::PitchLinearShape<
kWarpsContiguous, kWarpsStrided
>;
};
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations = layout::PitchLinearShape<
Detail::WarpAccessIterations::kContiguous / Detail::kWarpsContiguous,
Detail::WarpAccessIterations::kStrided / Detail::kWarpsStrided
>;
static_assert(Iterations::kCount,
"Number of iterations must be non-zero");
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta = typename BaseThreadMap::Delta;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
int warp_id = (thread_id / Detail::kWarpSize);
int lane_id = (thread_id % Detail::kWarpSize);
//
// compute warp-level offset
//
// This is the shape of the entire area covered by a warp's memory access (in units of vectors)
layout::PitchLinearCoord warp_footprint{
Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous,
Detail::WarpThreadArrangement::kStrided * Iterations::kStrided
};
// This is the offset of a specific warp (in units of vectors)
layout::PitchLinearCoord warp_offset{
(warp_id % Detail::kWarpsContiguous),
(warp_id / Detail::kWarpsContiguous)
};
// This is the offset of a specific thread within a warp (units of vectors)
layout::PitchLinearCoord thread_offset_in_warp{
lane_id % Detail::WarpThreadArrangement::kContiguous,
lane_id / Detail::WarpThreadArrangement::kContiguous
};
// This is the offset of a thread within a threadblock tile (units of vectors)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec =
warp_footprint * warp_offset + thread_offset_in_warp;
// This is the offset of a thread within a threadblock tile (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{
thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess,
thread_offset_in_threadblock_tile_vec.strided()
};
return thread_offset_in_threadblock_tile_base;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Transpose the existing ThreadMap. For example, interleaved layout is like
/// congruous in the global memory and crosswise in the shared memory. We need
/// to transpose the coordinates between two.
template <typename ThreadMap_, typename WarpThreadArrangement_>
struct TransposePitchLinearThreadMap {
/// Underlying ThreadMap
using ThreadMap = ThreadMap_;
/// Tensor coordinate
using TensorCoord = typename ThreadMap::TensorCoord;
/// Tile shape
using Shape = typename ThreadMap::Shape;
/// Number of threads total
static int const kThreads = ThreadMap::kThreads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
/// Shape of access by each thread
using ThreadAccessShape = layout::PitchLinearShape<kElementsPerAccess, 1>;
/// Internal details made public to facilitate introspection
struct Detail {
/// Fixed arrangement of threads within a warp (units of threads).
using WarpThreadArrangement = WarpThreadArrangement_;
/// Number of threads per warp
static int const kWarpSize = WarpThreadArrangement::kCount;
/// Number of participating warps
static int const kWarpCount = kThreads / kWarpSize;
static_assert(!(Shape::kContiguous % kElementsPerAccess),
"Shape must be divisible by vector length.");
/// Arrangement of warps within a threadblock-scoped tile
using WarpArrangement =
layout::PitchLinearShape<ThreadMap::Detail::kWarpsStrided,
ThreadMap::Detail::kWarpsContiguous>;
};
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations =
layout::PitchLinearShape<ThreadMap::Iterations::kStrided,
ThreadMap::Iterations::kContiguous>;
static_assert(Iterations::kContiguous == 1,
"Contiguous iteration has to be one to reuse the same shared store function with those that don't need transpose");
static_assert(Iterations::kCount, "Number of iterations must be non-zero");
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta =
layout::PitchLinearShape<Detail::WarpThreadArrangement::kContiguous *
kElementsPerAccess,
Detail::WarpThreadArrangement::kStrided>;
/// Maps thread ID to a coordinate offset within the tensor's logical
/// coordinate space Note this is slightly different from the one of
/// PitchLinearWarpRakedThreadMap.
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
int warp_id = (thread_id / Detail::kWarpSize);
int lane_id = (thread_id % Detail::kWarpSize);
//
// compute warp-level offset
//
// This is the shape of the entire area covered by a warp's memory access
// (in units of vectors)
layout::PitchLinearCoord warp_footprint{
Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous,
Detail::WarpThreadArrangement::kStrided * Iterations::kStrided};
// This is the offset of a specific warp (in units of vectors)
// Note the order of / and %. Also the 2nd operand is kStrided.
layout::PitchLinearCoord warp_offset{
(warp_id / Detail::WarpArrangement::kStrided),
(warp_id % Detail::WarpArrangement::kStrided)};
// This is the offset of a specific thread within a warp (units of vectors)
layout::PitchLinearCoord thread_offset_in_warp{
lane_id % Detail::WarpThreadArrangement::kContiguous,
lane_id / Detail::WarpThreadArrangement::kContiguous};
// This is the offset of a thread within a threadblock tile (units of
// vectors)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec =
warp_footprint * warp_offset + thread_offset_in_warp;
// This is the offset of a thread within a threadblock tile (units of
// elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{
thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess,
thread_offset_in_threadblock_tile_vec.strided()};
return thread_offset_in_threadblock_tile_base;
}
};
template <typename ThreadMap_>
struct TransposePitchLinearThreadMapSimt {
/// Underlying ThreadMap
using ThreadMap = ThreadMap_;
/// Tensor coordinate
using TensorCoord = typename ThreadMap::TensorCoord;
/// Tile shape
using Shape = typename ThreadMap::Shape;
/// Number of threads total
static int const kThreads = ThreadMap::kThreads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static_assert(kElementsPerAccess == 1 , "Simt transpose requires elements per access to be 1");
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations =
layout::PitchLinearShape<ThreadMap::Iterations::kStrided,
ThreadMap::Iterations::kContiguous>;
static_assert(Iterations::kCount, "Number of iterations must be non-zero");
static_assert(Iterations::kStrided == 1,
"Strided iteration has to be one to reuse the same shared store function with those that don't need transpose");
/// Shape of access by each thread
using ThreadAccessShape = typename ThreadMap::ThreadAccessShape;
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta =
layout::PitchLinearShape<ThreadMap::Delta::kStrided,
ThreadMap::Delta::kContiguous>;
/// Maps thread ID to a coordinate offset within the tensor's logical
/// coordinate space Note this is slightly different from the one of
/// PitchLinearWarpRakedThreadMap.
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
TensorCoord coord = ThreadMap::initial_offset(thread_id);
return TensorCoord(
coord.strided(),
coord.contiguous()
);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Policy defining a warp-striped arrangement. This partitions a tile into vectorized memory
/// accesses performed by each warp then distributes warps across them. Warps are striped in the
/// strided dimension and raked across the contiguous dimension.
template <
typename Shape_, /// Overall shape to partition in units of elements
int Threads, /// Number of partiticipation threads
typename WarpThreadArrangement_, /// Describes the shape of one memory access per warp
int ElementsPerAccess = 1 /// Number of elements accessed by each thread per memory operation (i.e. vector size)
>
struct PitchLinearWarpStripedThreadMap {
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Number of threads total
static int const kThreads = Threads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ElementsPerAccess;
/// Shape of access by each thread
using ThreadAccessShape = layout::PitchLinearShape<kElementsPerAccess, 1>;
/// Internal details made public to facilitate introspection
struct Detail {
/// Fixed arrangement of threads within a warp (units of threads).
using WarpThreadArrangement = WarpThreadArrangement_;
/// Number of threads per warp
static int const kWarpSize = WarpThreadArrangement::kCount;
/// Number of participating warps
static int const kWarpCount = kThreads / kWarpSize;
static_assert(
!(Shape::kContiguous % kElementsPerAccess),
"Shape must be divisible by vector length.");
/// Compute the 'shape' of the overall tile in units of vectors
using ShapeInAccesses = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess,
Shape::kStrided
>;
// compute number of warp-level accesses total
using WarpAccessIterations = layout::PitchLinearShape<
ShapeInAccesses::kContiguous / WarpThreadArrangement::kContiguous,
ShapeInAccesses::kStrided / WarpThreadArrangement::kStrided
>;
// Divide it into the number of warps, first partitioning the strided dimension then the
// contiguous.
static int const kWarpsStrided =
(WarpAccessIterations::kStrided >= kWarpCount
? kWarpCount : (kWarpCount / WarpAccessIterations::kStrided));
static int const kWarpsContiguous =
(kWarpCount > WarpAccessIterations::kStrided ?
WarpAccessIterations::kContiguous / kWarpsStrided : 1);
/// Arrangement of warps within a threadblock-scoped tile
using WarpArrangement = layout::PitchLinearShape<
kWarpsContiguous, kWarpsStrided
>;
};
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations = layout::PitchLinearShape<
Detail::WarpAccessIterations::kContiguous / Detail::kWarpsContiguous,
Detail::WarpAccessIterations::kStrided / Detail::kWarpsStrided
>;
static_assert(Iterations::kCount,
"Number of iterations must be non-zero");
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta = layout::PitchLinearShape<
Detail::WarpThreadArrangement::kContiguous * kElementsPerAccess,
Detail::WarpThreadArrangement::kStrided * Detail::WarpArrangement::kStrided
>;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
int warp_id = (thread_id / Detail::kWarpSize);
int lane_id = (thread_id % Detail::kWarpSize);
//
// compute warp-level offset
//
// This is the shape of the entire area covered by a warp's memory access (in units of vectors)
layout::PitchLinearCoord warp_footprint{
Detail::WarpThreadArrangement::kContiguous * Iterations::kContiguous,
Detail::WarpThreadArrangement::kStrided
};
// This is the offset of a specific warp (in units of vectors)
layout::PitchLinearCoord warp_offset{
(warp_id % Detail::kWarpsContiguous),
(warp_id / Detail::kWarpsContiguous)
};
// This is the offset of a specific thread within a warp (units of vectors)
layout::PitchLinearCoord thread_offset_in_warp{
lane_id % Detail::WarpThreadArrangement::kContiguous,
lane_id / Detail::WarpThreadArrangement::kContiguous
};
// This is the offset of a thread within a threadblock tile (units of vectors)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_vec =
warp_footprint * warp_offset + thread_offset_in_warp;
// This is the offset of a thread within a threadblock tile (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile_base{
thread_offset_in_threadblock_tile_vec.contiguous() * kElementsPerAccess,
thread_offset_in_threadblock_tile_vec.strided()
};
return thread_offset_in_threadblock_tile_base;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Strip-mines a pitch-linear tile among a given number of threads, first along the contiguous
/// dimension then along the strided dimension, while each thread access a 2D thread-tile.
///
/// The tile must be divisible by the thread count such that all threads may execute the same
/// number of iterations with the same delta to exhaustively cover the tile.
///
/// This class satisfies the "RegularThreadMapping" concept.
template <
typename Shape_,
int Threads,
typename ThreadTileShape
>
struct PitchLinear2DThreadTileStripminedThreadMap;
template <
typename Shape_,
int Threads
>
struct PitchLinear2DThreadTileStripminedThreadMap <Shape_, Threads, cutlass::layout::PitchLinearShape<4, 4>>{
/// Tensor coordinate
using TensorCoord = layout::PitchLinearCoord;
/// Tile shape
using Shape = Shape_;
/// Access Shape of each thread
using ThreadAccessShape = cutlass::layout::PitchLinearShape<4, 4>;
//using ThreadAccessShape = ThreadTileShape;
/// Number of threads total
static int const kThreads = Threads;
/// Extract length of each access from Layout
static int const kElementsPerAccess = ThreadAccessShape::kContiguous;
static_assert(!(kElementsPerAccess % 4) , "kElementsPerAccess, needs to be multiple of 4 (32bits)");
/// Internal implementation details
struct Detail {
static_assert(!(ThreadAccessShape::kContiguous % 4), "ThreadAccessShape, needs to be multiple of 4");
static_assert(!(Shape::kContiguous % ThreadAccessShape::kContiguous), "");
static_assert(!((Shape::kContiguous * Shape::kStrided) % (kThreads * ThreadAccessShape::kCount)),
"Shape must be divisible thread count * accesses per thread.");
/// Shape of the tile in units of vectors
using ShapeVec = layout::PitchLinearShape<
Shape::kContiguous / ThreadAccessShape::kContiguous,
Shape::kStrided / ThreadAccessShape::kStrided
>;
static_assert(
(Threads < ShapeVec::kContiguous && !(ShapeVec::kContiguous % kThreads)) ||
(!(kThreads % ShapeVec::kContiguous) && !(ShapeVec::kStrided % (kThreads / ShapeVec::kContiguous))),
"Shape must be divisible by number of iterations of each thread."
);
};
/// Number of iterations by each thread
using Iterations = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<
1,
// Redo the comparison here to work around divide by zero compiler
// error. The compiler evaluates both path of platform::conditional.
(Threads >= Detail::ShapeVec::kContiguous
? Detail::ShapeVec::kStrided /
(kThreads / Detail::ShapeVec::kContiguous)
: 0)>,
layout::PitchLinearShape<Detail::ShapeVec::kContiguous / kThreads,
Detail::ShapeVec::kStrided>>::type;
/// Interval between accesses along each dimension of the tensor's logical coordinate space
/// (in units of Elements)
using Delta = typename platform::conditional<
Threads >= Detail::ShapeVec::kContiguous,
layout::PitchLinearShape<
Shape::kContiguous,
kThreads * ThreadAccessShape::kStrided / Detail::ShapeVec::kContiguous
>,
layout::PitchLinearShape<
kThreads * ThreadAccessShape::kContiguous,
1
>
>::type;
/// Maps thread ID to a coordinate offset within the tensor's logical coordinate space
/// (in units of Elements)
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
return TensorCoord(
(thread_id % Detail::ShapeVec::kContiguous) * ThreadAccessShape::kContiguous,
(thread_id / Detail::ShapeVec::kContiguous) * ThreadAccessShape::kStrided);
}
};
/// Thread Mapping a 2D threadtiled mapping as a tranposed Pitchlinear2DThreadTile mapping
template <typename ThreadMap_>
struct TransposePitchLinearThreadMap2DThreadTile {
/// Underlying ThreadMap
using ThreadMap = ThreadMap_;
/// Tensor coordinate
using TensorCoord = typename ThreadMap::TensorCoord;
/// Tile shape
using Shape = typename ThreadMap::Shape;
/// Number of threads total
static int const kThreads = ThreadMap::kThreads;
/// Extract vector length from Layout
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static_assert(kElementsPerAccess > 1 , "Simt transpose requires elements per access to be 1");
///< Iterations along each dimension (concept: PitchLinearShape)
using Iterations =
layout::PitchLinearShape<ThreadMap::Iterations::kStrided,
ThreadMap::Iterations::kContiguous>;
static_assert(Iterations::kCount, "Number of iterations must be non-zero");
/// Shape of access by each thread
using ThreadAccessShape = typename ThreadMap::ThreadAccessShape;
///< Delta betweeen accesses (units of elements, concept: PitchLinearShape)
using Delta =
layout::PitchLinearShape<ThreadMap::Delta::kStrided,
ThreadMap::Delta::kContiguous>;
/// Maps thread ID to a coordinate offset within the tensor's logical
/// coordinate space Note this is slightly different from the one of
/// PitchLinearWarpRakedThreadMap.
CUTLASS_HOST_DEVICE
static TensorCoord initial_offset(int thread_id) {
TensorCoord coord = ThreadMap::initial_offset(thread_id);
return TensorCoord(
coord.strided(),
coord.contiguous()
);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 33,392 | C | 35.022654 | 130 | 0.68765 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/warp/vector_fragment_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of a warp vector
that participate in one warp-level mma operation.
Typically, this is used to access the scale/bias fragement of a warp-level mma operation.
The scale/bias vector is then partitioned into smaller fragments that can be fed into
next warp-level mma operation.
This iterator is necessary to accomplish warp-level mma fusion where the scale/bias vector is
applied to the multiplicand for the next mma.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_conversion.h"
namespace cutlass {
namespace transform {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the input fragment tile shape (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
//// Number of elements per access when loading fragment
int ElementsPerAccess>
class VectorFragmentIterator;
// Partial specialization for PitchLinear layout tile
template <
/// Size of the input fragment vector shape (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
//// Number of elements per access when loading fragment
int ElementsPerAccess>
class VectorFragmentIterator<Shape_, Element_,
cutlass::layout::PitchLinear,
InstructionShape_, ElementsPerAccess> {
public:
/// Size of the input threadblock tile shape (concept: MatrixShape)
using Shape = Shape_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::PitchLinear;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Number of participating threads
static int const kThreads = 32;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kRowsPerIteration = 8;
static int const kColumnsPerAccess = 8;
static int const kElementsPerIteration = kRowsPerIteration * InstructionShape::kK / kThreads;
static int const kAccessPerIteration = kElementsPerIteration / kElementsPerAccess;
/// Number of iterations
using Iterations = MatrixShape<InstructionShape::kM / kRowsPerIteration, Shape::kContiguous / kElementsPerIteration>;
public:
//
// Derived quantities
//
// All fragments have kElementsPerAccess scale followed by bias
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one iteration of the iterator.
using Fragment = Array<Element, kElementsPerIteration * Iterations::kRow>;
/// Input threadblock fragment tile
using ThreadblockFragment = Array<Element, Shape::kContiguous >;
private:
/// Internal access type
using AccessType = Array<Element, kElementsPerAccess>;
private:
//
// Data members
//
/// Input threadblock fragment tile
AccessType const *iterator_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
VectorFragmentIterator(ThreadblockFragment const &threadblock_frag)
: iterator_(reinterpret_cast<AccessType const *>(&threadblock_frag)),
index_(0) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
index_ += index_offset;
if(index_ >= Iterations::kColumn)
index_ = 0;
}
/// Increments
CUTLASS_HOST_DEVICE
VectorFragmentIterator &operator++() {
add_offset(1);
return *this;
}
CUTLASS_HOST_DEVICE
void set_index(int idx) {
index_ = idx;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int r = 0; r < Iterations::kRow; r++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kAccessPerIteration; i++) {
frag_ptr[i * Iterations::kRow + r].clear();
frag_ptr[i * Iterations::kRow + r] = iterator_[index_ * kAccessPerIteration + i];
}
}
}
};
// Partial specialization for Row-Major layout tile
template <
/// Size of the input fragment tile shape (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
//// Number of elements per access when loading fragment
int ElementsPerAccess>
class VectorFragmentIterator<Shape_, Element_,
cutlass::layout::RowMajor,
InstructionShape_, ElementsPerAccess> {
public:
/// Size of the input threadblock tile shape (concept: MatrixShape)
using Shape = Shape_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Underlying iterator
using Base = VectorFragmentIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, InstructionShape, ElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one iteration of the iterator.
using Fragment = typename Base::Fragment;
/// Input threadblock fragment tile
using ThreadblockFragment = typename Base::ThreadblockFragment;
private:
/// Underlying iterator
Base iterator_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
VectorFragmentIterator(ThreadblockFragment const &threadblock_frag)
: iterator_(threadblock_frag) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
iterator_.add_offset(index_offset);
}
/// Increments
CUTLASS_HOST_DEVICE
VectorFragmentIterator &operator++() {
add_offset(1);
return *this;
}
CUTLASS_HOST_DEVICE
void set_index(int idx) {
iterator_.set_index(idx);
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace conv
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 8,828 | C | 30.088028 | 119 | 0.67014 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/thread/unary_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
namespace cutlass {
namespace transform {
namespace thread {
namespace UnaryTransform {
struct Identity; ///< None (i.e., identity)
struct Conjugate; ///< Complex conjugate
}
/// Element-wise unary operator that transforms one element of a fragment at a time
template<
typename FragmentIn, ///< Input Fragment
typename FragmentOut,///< Output Fragment
typename Transform> ///< Unary transform operator
class UnaryOp
{
public:
CUTLASS_DEVICE
static FragmentOut execute(FragmentIn &in)
{
static_assert(FragmentIn::kElements == FragmentOut::kElements, "Number of elements must match.");
static_assert(platform::is_same<Transform, UnaryTransform::Identity>::value ||
platform::is_same<Transform, UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
FragmentOut out;
if (platform::is_same<Transform, UnaryTransform::Identity>::value )
{
CUTLASS_PRAGMA_UNROLL
for (int i=0; i < FragmentIn::kElements; ++i){
out[i] = static_cast<typename FragmentOut::Element>(in[i]);
}
}
else if (platform::is_same<Transform, UnaryTransform::Conjugate>::value )
{
for (int i=0; i < FragmentIn::kElements; ++i){
out[i] = conj(static_cast<typename FragmentOut::Element>(in[i]));
}
}
return out;
}
};
template<typename FragmentIn, typename Transform>
class UnaryOp<FragmentIn, FragmentIn, Transform>
{
public:
CUTLASS_DEVICE
static FragmentIn execute(FragmentIn &in)
{
static_assert(platform::is_same<Transform, UnaryTransform::Identity>::value ||
platform::is_same<Transform, UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
if (platform::is_same<Transform, UnaryTransform::Identity>::value )
{
return in;
}
else if (platform::is_same<Transform, UnaryTransform::Conjugate>::value )
{
for(int i=0; i < FragmentIn::kElements; ++i){
in[i] = conj(in[i]);
}
}
return in;
}
};
}
}
}
| 4,309 | C | 39.660377 | 109 | 0.613832 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/thread/transpose.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Basic copy routines for tensor views
*/
#pragma once
namespace cutlass {
namespace transform {
namespace thread {
/// Transforms a fragment by doing a transpose
template <
int ElementCount,
typename TransposeShape,
typename Element
> struct Transpose;
/// Specialization for int8_t 4x4 transpose
template <int ElementCount_>
struct Transpose<ElementCount_, layout::PitchLinearShape<4,4> , int8_t> {
static const int kElementCount = ElementCount_;
using TransposeShape = layout::PitchLinearShape<4,4>;
using Element = int8_t;
using Fragment = cutlass::Array<Element, kElementCount>;
static_assert(!(kElementCount % TransposeShape::kCount), "Shape needs to be multiple of 16 elements to do a 4x4 transpose");
CUTLASS_DEVICE
void transform(Fragment& dst, Fragment& src) {
// Expose src/dst as int arrays.
int* src_int = reinterpret_cast<int*>(&src);
int* dst_int = reinterpret_cast<int*>(&dst);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementCount / TransposeShape::kCount; i++){
int const i0 = 4 * i + 0;
int const i1 = 4 * i + 1;
int const i2 = 4 * i + 2;
int const i3 = 4 * i + 3;
int a0 = src_int[i0];
int a1 = src_int[i1];
int a2 = src_int[i2];
int a3 = src_int[i3];
int b0, b1, b2, b3, c0;
b0 = __byte_perm(a0, a1, 0x0040);
c0 = __byte_perm(a2, a3, 0x0040);
b0 = __byte_perm(b0, c0, 0x5410);
b1 = __byte_perm(a0, a1, 0x0051);
c0 = __byte_perm(a2, a3, 0x0051);
b1 = __byte_perm(b1, c0, 0x5410);
b2 = __byte_perm(a0, a1, 0x0062);
c0 = __byte_perm(a2, a3, 0x0062);
b2 = __byte_perm(b2, c0, 0x5410);
b3 = __byte_perm(a0, a1, 0x0073);
c0 = __byte_perm(a2, a3, 0x0073);
b3 = __byte_perm(b3, c0, 0x5410);
dst_int[i0] = b0;
dst_int[i1] = b1;
dst_int[i2] = b2;
dst_int[i3] = b3;
}
}
};
} // namespace thread
} // namespace layout
} // namespace cutlass
| 3,835 | C | 34.518518 | 128 | 0.641982 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of tiles
from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last
"residue" tile first, with the objective of minimizing predicate mask updates
during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileAccessIterator2dThreadTile
///
template <typename Shape, typename Element, typename Layout, int AdvanceRank,
typename ThreadMap, typename AccessType>
class PredicatedTileAccessIterator2dThreadTile;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data.
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class PredicatedTileAccessIterator2dThreadTile<Shape_, Element_, layout::PitchLinear,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kPredicatesPerByte = 4;
static int const kPredicatesPerWord = 4 * kPredicatesPerByte;
/// Number of 32b words containing predicates
static int const kPredicateByteCount = (ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kStrided + kPredicatesPerByte - 1) / kPredicatesPerByte;
static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4;
static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u;
static_assert(kPredicateWordCount <= 4, "Too many predicates.");
/// Predicate vector stores mask to guard accesses
using Mask = Array<uint32_t, kPredicateWordCount>;
/// Uses a non-template class
struct Params : PredicatedTileAccessIteratorParams {
public:
friend PredicatedTileAccessIterator2dThreadTile;
using Base = PredicatedTileAccessIteratorParams;
// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) :
Base(layout.stride(0),
MakePredicatedTileAccessIteratorDesc<Shape, Element, Layout, kAdvanceRank, ThreadMap>()()
) { }
CUTLASS_HOST_DEVICE
Params(Base const &base) :
Base(base) { }
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Guard predicates
uint32_t predicates_[kPredicateWordCount];
/// Size of tensor
TensorCoord extent_;
/// Initial offset for each thread
TensorCoord thread_offset_;
/// Index of residue tile
int residue_tile_idx_;
/// Used for out-of-order visitation
bool is_residue_tile_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
/// Tracks iterations within the thread loop
int iteration_thread_;
private:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_HOST_DEVICE
void compute_predicates_(
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0u;
}
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++) {
TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous,
ts + s * ThreadMap::Delta::kStrided);
TensorCoord coord = thread_offset_ + iteration_coord;
bool guard;
if (is_steady_state) {
if (kAdvanceRank == 0) {
guard = (coord.strided() < extent_.strided());
} else {
guard = (coord.contiguous() < extent_.contiguous());
}
} else {
guard = (coord.strided() < extent_.strided() &&
coord.contiguous() < extent_.contiguous());
}
int pred_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided;
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx));
}
}
}
}
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
extent_(extent),
is_residue_tile_(true) {
TensorCoord residue_offset;
if (kAdvanceRank) {
residue_tile_idx_ =
(extent_[kAdvanceRank] - threadblock_offset[kAdvanceRank] - 1) /
Shape::kStrided;
residue_offset = make_Coord(0, residue_tile_idx_ * Shape::kStrided);
} else {
residue_tile_idx_ =
(extent_[kAdvanceRank] - threadblock_offset[kAdvanceRank] - 1) /
Shape::kContiguous;
residue_offset = make_Coord(residue_tile_idx_ * Shape::kContiguous, 0);
}
// Per-thread offset in logical coordinates of tensor
thread_offset_ = threadblock_offset + residue_offset +
ThreadMap::initial_offset(thread_id);
// update internal pointers
Layout layout(params_.stride_);
add_pointer_offset(layout(thread_offset_));
compute_predicates_(false);
set_iteration_index(0);
}
/// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id)
: PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
int residual = index % (ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided);
iteration_strided_ = index / (ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided);
iteration_contiguous_ = residual / ThreadMap::ThreadAccessShape::kStrided;
iteration_thread_ = residual % ThreadMap::ThreadAccessShape::kStrided;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += int(sizeof(Element)) * pointer_offset;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
if (is_residue_tile_) {
TensorCoord residue_offset;
if (kAdvanceRank) {
residue_offset = TensorCoord(0, residue_tile_idx_ * Shape::kStrided);
} else {
residue_offset = TensorCoord(residue_tile_idx_ * Shape::kContiguous, 0);
}
thread_offset_ -= residue_offset;
Layout layout(params_.stride_);
add_pointer_offset(-layout(residue_offset));
compute_predicates_(true);
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * (tile_offset.strided() - 1);
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * (tile_offset.contiguous() - 1);
pointer_ += Shape::kStrided * tile_offset.strided();
}
} else {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * tile_offset.strided();
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * tile_offset.contiguous();
pointer_ += Shape::kStrided * tile_offset.strided();
}
}
is_residue_tile_ = false;
}
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *ret_val = reinterpret_cast<AccessType *>(
pointer_ + (iteration_thread_ * params_.stride_ + iteration_contiguous_ * ThreadMap::Delta::kContiguous) * int(sizeof(Element)));
return ret_val;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile &operator++() {
iteration_thread_++;
if (iteration_thread_ < ThreadMap::ThreadAccessShape::kStrided)
return *this;
iteration_thread_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
pointer_ += params_.inc_strided_;
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile operator++(int) {
PredicatedTileAccessIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = enable ? 0u : predicates_[i];
}
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0xffffffff;
}
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = mask[i];
}
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] = predicates_[i];
}
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
int pred_idx =
iteration_thread_ +
iteration_contiguous_ * ThreadMap::ThreadAccessShape::kStrided +
iteration_strided_ * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided;
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class PredicatedTileAccessIterator2dThreadTile<Shape_, Element_, layout::ColumnMajor,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIterator2dThreadTile<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator2dThreadTile;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))){}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(),
threadblock_offset.column())) {}
/// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile operator++(int) {
PredicatedTileAccessIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class PredicatedTileAccessIterator2dThreadTile<Shape_, Element_, layout::RowMajor,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIterator2dThreadTile<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIterator2dThreadTile;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))){}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIterator2dThreadTile operator++(int) {
PredicatedTileAccessIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 28,232 | C | 32.811976 | 160 | 0.659039 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_triangular_matrix.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of tiles
from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last
"residue" tile first, with the objective of minimizing predicate mask updates
during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileAccessIteratorTriangularMatrix
///
template <typename Shape, typename Element, typename Layout,
int AdvanceRank, typename ThreadMap,
SideMode kSideMode, FillMode kFillMode, DiagType kDiagType,
typename AccessType>
class PredicatedTileAccessIteratorTriangularMatrix;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for pitch-linear data.
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, SideMode kSideMode, FillMode kFillMode, DiagType kDiagType, typename AccessType_>
class PredicatedTileAccessIteratorTriangularMatrix<Shape_, Element_, layout::PitchLinear,
AdvanceRank, ThreadMap_, kSideMode, kFillMode, kDiagType, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
using CompareOp = typename TrMatrixCompareOp<kFillMode, kDiagType>::Type;
static_assert( kFillMode == FillMode::kFull ||
((kFillMode == FillMode::kLower || kFillMode == FillMode::kUpper) && AccessType::kElements == 1),
"BLAS3 iterator for the triangular/symmetric matrix must use AccessType::kElements as 1");
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static int const kPredicatesPerByte = 4;
static int const kPredicatesPerWord = 4 * kPredicatesPerByte;
static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector;
/// Number of 32b words containing predicates
static int const kPredicateByteCount =
(kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte;
static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4;
static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u;
static_assert(kPredicateWordCount <= 4, "Too many predicates.");
/// Predicate vector stores mask to guard accesses
using Mask = Array<uint32_t, kPredicateWordCount>;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend PredicatedTileAccessIteratorTriangularMatrix;
private:
/// stride of pitch-linear layout (units of Element)
StrideIndex stride_;
/// (true) pitch-linear layout is mapped to row-major matrix
/// (false) pitch-linear layout is mapped to column-major matrix
bool is_row_major_;
/// for vectorized access across the diagonal boundary guard condition is
/// checked for the element on the boundary
int access_diagonal_boundary_;
/// amount (in byte) to increment pointer to move to next access along
/// strided dimension
LongIndex inc_strided_;
/// amount (in byte) to increment pointer from last access to first access
/// of next tile
LongIndex inc_next_;
/// amount (in byte) to increment pointer from first access of current tile
/// to first access of next tile
LongIndex inc_advance_;
public:
// Default ctor
CUTLASS_HOST_DEVICE
Params(): stride_(0), inc_strided_(0), inc_next_(0), inc_advance_(0), is_row_major_(false), access_diagonal_boundary_(0) { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout, bool is_row_major, int access_diagonal_boundary) :
stride_(layout.stride(0)), is_row_major_(is_row_major), access_diagonal_boundary_(access_diagonal_boundary) {
inc_strided_ = (LongIndex(stride_) * ThreadMap::Delta::kStrided) *
sizeof_bits<Element>::value / 8;
if (kAdvanceRank) {
// advance along strided dimension
inc_advance_ =
Shape::kStrided * LongIndex(stride_) * sizeof_bits<Element>::value / 8;
} else {
// advance along contiguous dimension
inc_advance_ = Shape::kContiguous * sizeof_bits<Element>::value / 8;
}
inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kStrided - 1) *
ThreadMap::Delta::kStrided * LongIndex(stride_) *
sizeof_bits<Element>::value / 8;
};
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Guard predicates
uint32_t predicates_[kPredicateWordCount];
/// Track global memory addresses on the diagonal
/// To ignore imag part for diagonal elements of hermitian matrices
uint32_t predicates_onDiag_[kPredicateWordCount];
/// Size of tensor
TensorCoord extent_;
/// Initial offset for each thread
TensorCoord thread_offset_;
/// Iteration along vectors implied by the thread map
int iteration_vector_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
private:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0u;
predicates_onDiag_[i] = 0u;
}
CompareOp compare_op;
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) {
int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int c = access_residual / kAccessesPerVector;
int v = access_residual % kAccessesPerVector;
TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements,
s * ThreadMap::Delta::kStrided);
TensorCoord coord = thread_offset_ + iteration_coord;
bool guard;
bool onDiag = false;
guard = ((coord.strided() < extent.strided()) &&
(coord.contiguous() < extent.contiguous()));
// guard access on the wrong side of the triagular matrix diagonal
if (kFillMode == FillMode::kLower || kFillMode == FillMode::kUpper) {
coord += TensorCoord{params_.access_diagonal_boundary_, 0};
bool triagular_guard_row_major = compare_op(coord.strided(), coord.contiguous()) | !params_.is_row_major_;
bool triagular_guard_col_major = compare_op(coord.contiguous(), coord.strided()) | params_.is_row_major_;
guard = guard && triagular_guard_row_major && triagular_guard_col_major;
if (kDiagType == DiagType::kUnit) {
onDiag = (guard && coord.strided() == coord.contiguous()) ? true : false;
}
}
int pred_idx_onDiag = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx_onDiag = pred_idx_onDiag / kPredicatesPerWord;
int residual_onDiag = pred_idx_onDiag % kPredicatesPerWord;
int byte_idx_onDiag = residual_onDiag / kPredicatesPerByte;
int bit_idx_onDiag = residual_onDiag % kPredicatesPerByte;
predicates_onDiag_[word_idx_onDiag] |= (unsigned(onDiag) << (byte_idx_onDiag * 8 + bit_idx_onDiag));
int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx));
}
}
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(const_cast<NonConstPointer>(pointer))),
extent_(extent) {
// Per-thread offset in logical coordinates of tensor
thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id);
// update internal pointers
Layout layout(params_.stride_);
add_pointer_offset(layout(thread_offset_));
compute_predicates_(extent_);
set_iteration_index(0);
}
/// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id)
: PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided());
pointer_ += Shape::kContiguous * tile_offset.contiguous();
thread_offset_ += TensorCoord{0, Shape::kStrided * tile_offset.strided()};
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous());
pointer_ += Shape::kStrided * tile_offset.strided();
thread_offset_ += TensorCoord{Shape::kContiguous * tile_offset.contiguous(), 0};
}
compute_predicates_(extent_);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) / 8) + iteration_vector_;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
pointer_ += params_.inc_strided_;
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix operator++(int) {
PredicatedTileAccessIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = enable ? 0u : predicates_[i];
}
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0xffffffff;
}
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = mask[i];
}
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] = predicates_[i];
}
}
/// Return if the address in on the diagonal
CUTLASS_HOST_DEVICE
bool getOnDiag() {
int pred_idx =
iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_onDiag_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
int pred_idx =
iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
//return true;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for column-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_,
SideMode kSideMode, FillMode kFillMode, DiagType kDiagType,
typename AccessType_>
class PredicatedTileAccessIteratorTriangularMatrix<Shape_, Element_, layout::ColumnMajor,
AdvanceRank, ThreadMap_, kSideMode, kFillMode, kDiagType,
AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIteratorTriangularMatrix<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap,
kSideMode, kFillMode, kDiagType, AccessType>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
static int const kAccessDiagonalBoundary =
(kFillMode == FillMode::kLower) ? (AccessType::kElements - 1) : 0;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorTriangularMatrix;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0)), false, kAccessDiagonalBoundary){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(),
threadblock_offset.column())) {}
/// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix operator++(int) {
PredicatedTileAccessIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Return if the address in on the diagonal
CUTLASS_HOST_DEVICE
bool getOnDiag() {
return iterator_.getOnDiag();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIteratorTriangularMatrix for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_,
SideMode kSideMode, FillMode kFillMode, DiagType kDiagType,
typename AccessType_>
class PredicatedTileAccessIteratorTriangularMatrix<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_,
kSideMode, kFillMode, kDiagType, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileAccessIteratorTriangularMatrix<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap,
kSideMode, kFillMode, kDiagType, AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
static int const kAccessDiagonalBoundary =
(kFillMode == FillMode::kUpper) ? (AccessType::kElements - 1) : 0;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileAccessIteratorTriangularMatrix;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0)), true, kAccessDiagonalBoundary){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileAccessIteratorTriangularMatrix(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorTriangularMatrix operator++(int) {
PredicatedTileAccessIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// Return if the address in on the diagonal
CUTLASS_HOST_DEVICE
bool getOnDiag() {
return iterator_.getOnDiag();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 31,412 | C | 34.176932 | 129 | 0.669426 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of tiles
from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <typename Shape, typename Element, typename Layout, int AdvanceRank,
typename ThreadMap,
bool Dynamic_iterations = false,
int Alignment =
sizeof_bits<Element>::value* ThreadMap::kElementsPerAccess / 8
>
class RegularTileAccessIteratorDirectConv;
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps with dynamic_iterations OFF
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIteratorDirectConv<
Shape_, Element_,
layout::PitchLinear,
AdvanceRank, ThreadMap_, false, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Element type per access
using AccessType = Array<Element, ThreadMap::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: stride_(ref.stride(0) / ThreadMap::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_base));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_num(int num) {
//Do nothing
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv operator++(int) {
RegularTileAccessIteratorDirectConv prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset in the unit of tile.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.contiguous() * Shape::kContiguous +
coord.strided() * ThreadMap::Iterations::kStrided *
ThreadMap::Delta::kStrided * stride_ * ThreadMap::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps with dynamic_iterations ON
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIteratorDirectConv<
Shape_, Element_,
layout::PitchLinear,
AdvanceRank, ThreadMap_,true, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Element type per access
using AccessType = Array<Element, ThreadMap::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
/// Total iterattions in the strided dimension: Dynamic value
int total_iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: stride_(ref.stride(0) / ThreadMap::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_base));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_num(int num) {
total_iteration_strided_ = num;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < total_iteration_strided_) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv operator++(int) {
RegularTileAccessIteratorDirectConv prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset in the unit of tile.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.contiguous() * Shape::kContiguous +
coord.strided() * total_iteration_strided_ * ThreadMap::Delta::kStrided * stride_ *
ThreadMap::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for column major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_,bool Dynamic_iterations, int Alignment >
class RegularTileAccessIteratorDirectConv<
Shape_, Element_,
layout::ColumnMajor,
AdvanceRank, ThreadMap_, Dynamic_iterations , Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIteratorDirectConv<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap_,
Dynamic_iterations>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_num(int num) {
iterator_.set_iteration_num(num);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv operator++(int) {
RegularTileAccessIteratorDirectConv prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for row major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_,bool Dynamic_iterations, int Alignment>
class RegularTileAccessIteratorDirectConv<
Shape_, Element_,
layout::RowMajor,
AdvanceRank, ThreadMap_, Dynamic_iterations, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIteratorDirectConv<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap_,
Dynamic_iterations>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_num(int num) {
iterator_.set_iteration_num(num);
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIteratorDirectConv operator++(int) {
RegularTileAccessIteratorDirectConv prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 18,623 | C | 30.673469 | 106 | 0.66391 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_vector_access_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of loading small
vectors from the global memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedVectorAccessIterator
///
template <
/// Shape of the vector accessed by the entire threadblock
typename Shape,
/// Shape of the vector accessed by the warp
typename WarpShape,
/// Type of Element
typename Element,
/// Layout of the vector
typename Layout,
/// Number of elements for each access
int ElementsPerAccess,
/// Support residual tile
bool EnableResidualAccess = false
>
class PredicatedVectorAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Vector access iterator specialized for vectors, e.g. scale and bias
/// Thread arrangements are for TensorOps
///
template <
typename Shape_,
typename WarpShape_,
typename Element_,
int ElementsPerAccess,
bool EnableResidualAccess
>
class PredicatedVectorAccessIterator <
Shape_,
WarpShape_,
Element_,
layout::PitchLinear,
ElementsPerAccess,
EnableResidualAccess
> {
public:
using Shape = Shape_;
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
// static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kThreads = 32;
static int const kRowsPerIteration = 8;
static int const kThreadsPerRow = kThreads / kRowsPerIteration;
static int const kThreadsPerRowMask = 0x3;
static int const kIterations = WarpShape::kContiguous / (kThreadsPerRow * kElementsPerAccess);
static int const kWarpCountStrided = Shape::kStrided / WarpShape::kStrided;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Extent of tensor
TensorCoord extent_;
/// pointer offset of each thread
TensorCoord thread_offset_;
/// iteration index
LongIndex iteration_;
/// residual access
bool is_residual_;
/// residual offset of each thread
TensorCoord residual_offset_;
public:
/// Constructs a vector access iterator
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
/// Pointer to the start of the vector
ConstPointer pointer,
/// Extent of vector
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// ID of each participating warp
int warp_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
extent_(extent),
is_residual_(false) {
int warp_offset = (warp_id / kWarpCountStrided) * WarpShape::kContiguous;
// Per-thread offset in logical coordinates of tensor
thread_offset_ = threadblock_offset + TensorCoord(warp_offset, 0) +
TensorCoord((thread_id & kThreadsPerRowMask) * kElementsPerAccess, 0);
set_iteration_index(0);
if(EnableResidualAccess) {
// compute residual offset
typename TensorCoord::Index residual_size = extent_.contiguous() % WarpShape::kContiguous;
if (residual_size) {
is_residual_ = true;
residual_offset_ = make_Coord(residual_size, 0);
}
}
}
/// Construct a PredicatedVectorAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
/// Pointer to start of vector
ConstPointer pointer,
/// Extent of vector
TensorCoord extent,
///< ID of each participating thread
int thread_id,
/// ID of each participating warp
int warp_id)
: PredicatedVectorAccessIterator(pointer, extent, thread_id, warp_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_ = index;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
thread_offset_ =
thread_offset_ +
TensorCoord(WarpShape::kContiguous * tile_offset.contiguous(), 0);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
((thread_offset_.contiguous() + iteration_ * kThreadsPerRow * kElementsPerAccess)
* sizeof_bits<Element>::value / 8));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator &operator++() {
++iteration_;
if(iteration_ >= kIterations)
iteration_ = 0;
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
void advance() {
if(EnableResidualAccess && is_residual_) {
is_residual_ = false;
thread_offset_ += residual_offset_;
}
else
add_tile_offset(TensorCoord(1, 0));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator operator++(int) {
PredicatedVectorAccessIterator self(*this);
operator++();
return self;
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return ((thread_offset_.contiguous() +
iteration_ * kThreadsPerRow * kElementsPerAccess) < extent_.contiguous());
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedVectorAccessIterator for row-major data.
///
template <
typename Shape_,
typename WarpShape_,
typename Element_,
int ElementsPerAccess,
bool EnableResidualAccess
>
class PredicatedVectorAccessIterator<
Shape_,
WarpShape_,
Element_,
layout::RowMajor,
ElementsPerAccess,
EnableResidualAccess
> {
public:
using Shape = Shape_;
using WarpShape = WarpShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedVectorAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
layout::PitchLinearShape<WarpShape::kColumn, WarpShape::kRow>,
Element,
layout::PitchLinear,
ElementsPerAccess,
EnableResidualAccess>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
static int const kRowsPerIteration = UnderlyingIterator::kRowsPerIteration;
static int const kThreads = UnderlyingIterator::kThreads;
static int const kIterations = UnderlyingIterator::kIterations;
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
///< Pointer to the start of the vector
ConstPointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< ID of each participating warp
int warp_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(pointer, layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id, warp_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedVectorAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator(
ConstPointer pointer, ///< Pointer to the start of the vector
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
int warp_id ///< ID of each participating warp
)
: PredicatedVectorAccessIterator(pointer, extent, thread_id, warp_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedVectorAccessIterator operator++(int) {
PredicatedVectorAccessIterator self(*this);
operator++();
return self;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
void advance() {
iterator_.advance();
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
| 13,088 | C | 30.313397 | 100 | 0.669927 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_access_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of scale and bias vectors.
This iterator uses masks to guard out-of-bounds accesses.
It can be used to load the gamma and beta vectors of layernorm which is loop variant.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedScaleBiasVectorAccessIterator
///
template <typename ThreadblockShape,
typename Element,
typename Layout>
class PredicatedScaleBiasVectorAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for fprop pitch-linear data.
///
template <typename ThreadblockShape_, typename Element_>
class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_,
Element_,
layout::PitchLinear> {
public:
using ThreadblockShape = ThreadblockShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
static int const kThreads = ThreadblockShape::kContiguous / kElementsPerAccess;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Internal pointer to first access of tile
BytePointer pointer_;
TensorCoord thread_offset_;
int problem_size_k_;
/// Used for out-of-order visitation
bool is_residue_tile_;
bool guard_;
TensorCoord::Index residue_size_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Extent of tensor
int problem_size_k,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset) {
pointer_ = (thread_id < kThreads)
? reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(scale_pointer))
: reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(bias_pointer));
// Per-thread offset in logical coordinates of tensor
int thread_base = (thread_id < kThreads) ? 0 : kThreads;
problem_size_k_ = problem_size_k;
is_residue_tile_ = true;
residue_size_ = (problem_size_k_ - threadblock_offset.contiguous()) % ThreadblockShape::kContiguous;
if (residue_size_ == 0) {
residue_size_ = ThreadblockShape::kContiguous;
}
guard_ = ((thread_id - thread_base) * kElementsPerAccess) < residue_size_;
thread_offset_ =
threadblock_offset +
TensorCoord((thread_id - thread_base) * kElementsPerAccess, 0);
set_iteration_index(0);
}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Extent of tensor
int problem_size_k,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorAccessIterator(problem_size_k,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {}
/// Advances an iterator along logical dimensions of matrix in units of whole threadblock tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
guard_ = threadIdx.x < kThreads * 2;
TensorCoord offset = is_residue_tile_ ?
TensorCoord(residue_size_ + ThreadblockShape::kContiguous * (tile_offset.contiguous() - 1), 0)
: TensorCoord(ThreadblockShape::kContiguous * tile_offset.contiguous(), 0);
thread_offset_ =
thread_offset_ +
offset;
is_residue_tile_ = false;
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
(thread_offset_.contiguous() * sizeof_bits<Element>::value / 8));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator &operator++() {
return *this;
}
/// Increment and return an instance to self.
CUTLASS_DEVICE
PredicatedScaleBiasVectorAccessIterator operator++(int) {
PredicatedScaleBiasVectorAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
guard_ &= (!enable);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return guard_;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename ThreadblockShape_,
typename Element_>
class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_,
Element_,
layout::RowMajor> {
public:
using ThreadblockShape = ThreadblockShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedScaleBiasVectorAccessIterator<
layout::PitchLinearShape<ThreadblockShape::kColumn, ThreadblockShape::kRow>,
Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
///< Extent of tensor
int problem_size_k,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(problem_size_k, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
int problem_size_k, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorAccessIterator(problem_size_k,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// threadblock tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator operator++(int) {
PredicatedScaleBiasVectorAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 12,890 | C | 33.284574 | 104 | 0.652444 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator_triangular_matrix.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of tiles from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile
first, with the objective of minimizing predicate mask updates during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be stored in registers,
and integer addition is used to advance the pointer through memory.
*/
#pragma once
#include "cutlass/arch/memory.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_triangular_matrix.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileIteratorTriangularMatrix
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
/// Regular tile iterator using a precomputed control structure to minimize register liveness
/// and integer arithmetic.
///
/// Layout is assumed to be invariant at the time the precomputed "Params" object is constructed.
///
/// Base pointer and tensor extents may be specified at the time the iterator is constructed.
/// Subsequently, they are assumed to be immutable.
///
/// Adding a logical coordinate offset may be performed at the time the iterator is constructed.
/// Subsequent additions to logical coordinate offset may be performed but are relatively expensive.
///
/// Vistitation order is intended to first visit a "residual" tile that may be partially full in
/// both the advance dimension and the steady-state dimension. This is assumed to be the last
/// tile in the iteration sequence. Advancing an iterator that has just been constructed moves to
/// the first tile that is full in the advance dimension and recomputes predicates. Subsequent
/// accesses may be performed without updating internal predicates and are efficient in terms of
/// live register state and pointer arithmetic instructions.
///
/// To be efficient, this assumes the iteraor will be dereferenced and advanced at least once
/// outside any looping structure to minimize integer arithmetic.
///
/// Acceses out of bounds are safe so long as `clear_mask()` is called prior to dereferencing
/// the iterator.
///
///
/// Example:
///
/// An efficient pipeline structure may be constructed as follows:
///
// template <typename Iterator>
// __global__ void kernel(
// typename Iterator::Params params,
// typename Iterator::Element *ptr,
// TensorCoord extent) {
//
// typename Iterator::Fragment fragment;
//
// TensorCoord threadblock_offset(0, 0);
//
// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets);
//
//
// fragment = *iter; // load "residue" tile first
// ++iter; // advance to first "steady state" tile and update internal masks
//
//
// #pragma unroll
// for (int i = Remaining - 1; i >= 0; --i) {
//
// f(fragment);
//
// if (!i) {
// iter.clear_mask(); // light-weight operation to clear masks - subsequent loads become NO-OPs.
// }
//
// fragment = *iter; // load tile during "steady state" phase
// ++iter; // advance to next tile - lightweight due to steady-state masks
// }
// }
//
// void host(TensorView<Element, 2, layout::PitchLinear> view) {
//
// using Iterator = transform::threadblock::PredicatedTileIteratorTriangularMatrix;
//
// typename Iterator::Params params(view.layout());
//
// kernel<Iterator>(params, view.data());
// }
///
///
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
SideMode kSideMode,
FillMode kFillMode,
DiagType kDiagType,
int AccessSize = ThreadMap::kElementsPerAccess
>
class PredicatedTileIteratorTriangularMatrix;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIteratorTriangularMatrix for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_,
SideMode kSideMode, FillMode kFillMode, DiagType kDiagType,
int AccessSize>
class PredicatedTileIteratorTriangularMatrix<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_,
kSideMode, kFillMode, kDiagType,
AccessSize> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
/// Type used for internal memory accesses
using AccessType = AlignedArray<Element, AccessSize, (AccessSize * sizeof_bits<Element>::value / 8)>;
/// Underlying iterator to compute the addresses
using TileAccessIterator =
PredicatedTileAccessIteratorTriangularMatrix<Shape, Element, Layout, kAdvanceRank,
ThreadMap, kSideMode, kFillMode, kDiagType, AccessType>;
static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename TileAccessIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend PredicatedTileIteratorTriangularMatrix;
private:
/// Parameters object
typename TileAccessIterator::Params params_;
public:
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : params_(layout) { }
CUTLASS_HOST_DEVICE
Params() { }
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: address_iterator_(params.params_, pointer, extent, thread_id,
threadblock_offset) {}
/// Construct a PredicatedTileIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileIteratorTriangularMatrix(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix &operator++() {
if (kAdvanceRank)
address_iterator_.add_tile_offset({0, 1});
else
address_iterator_.add_tile_offset({1, 0});
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix operator++(int) {
PredicatedTileIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { address_iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { address_iterator_.get_mask(mask); }
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
address_iterator_.set_iteration_index(idx);
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + byte_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
cutlass::arch::global_load<AccessType,
sizeof(AccessType)
>(
frag_ptr[idx], access_ptr, address_iterator_.valid());
++address_iterator_;
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_byte_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
address_iterator_.set_iteration_index(0);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset;
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr);
if (address_iterator_.valid()) {
*access_ptr = frag_ptr[idx];
}
++address_iterator_;
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_byte_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIteratorTriangularMatrix for column-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
SideMode kSideMode,
FillMode kFillMode,
DiagType kDiagType,
int AccessSize
>
class PredicatedTileIteratorTriangularMatrix<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_,
kSideMode, kFillMode, kDiagType,
AccessSize> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIteratorTriangularMatrix<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
kSideMode,
kFillMode,
kDiagType,
AccessSize
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIteratorTriangularMatrix;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {
}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset ///< Initial offset of threadblock
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())
) { }
/// Construct a PredicatedTileIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIteratorTriangularMatrix(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix operator++(int) {
PredicatedTileIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
iterator_.store_with_byte_offset(frag, byte_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIteratorTriangularMatrix for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
SideMode kSideMode,
FillMode kFillMode,
DiagType kDiagType,
int AccessSize
>
class PredicatedTileIteratorTriangularMatrix<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_,
kSideMode, kFillMode, kDiagType,
AccessSize> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIteratorTriangularMatrix<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
kSideMode,
kFillMode,
kDiagType,
AccessSize
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIteratorTriangularMatrix;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {
};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset ///< Initial offset of threadblock
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())
) { }
/// Construct a PredicatedTileIteratorTriangularMatrix with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIteratorTriangularMatrix(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIteratorTriangularMatrix operator++(int) {
PredicatedTileIteratorTriangularMatrix self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
iterator_.store_with_byte_offset(frag, byte_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 28,064 | C | 33.267399 | 109 | 0.666334 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of tiles
from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::PitchLinear,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Element type per access
using AccessType = Array<Element, ThreadMap::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: stride_(ref.stride(0) / ThreadMap::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_base));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset in the unit of tile.
/// In GEMM/Conv implementation, this is used to move in the k dimension in the shared memory.
/// Below layouts are the shared memory layouts. Current SM50 SIMT kernels only use col major A and row major B.
/// For row major A operand, k dimension is contiguous dimension;
/// For col major A operand, k dimension is strided dimension;
/// For row major B operand, k dimension is strided dimension;
/// For col major B operand, k dimension is contiguous dimension.
/// Below two classes map col/row major to the pitch linear coordinates used
/// in this base class.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.contiguous() * Shape::kContiguous +
coord.strided() * Shape::kStrided * stride_ *
ThreadMap::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for column major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajor,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for row major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::RowMajor,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 13,283 | C | 31.479218 | 115 | 0.661823 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/ell_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Ell iterator for matrix of indices (ellColInd matrix)
*/
#pragma once
namespace cutlass {
namespace transform {
namespace threadblock {
namespace ell{
constexpr unsigned int SmemPow = 8;
constexpr unsigned int SmemStages = 2;
constexpr unsigned int SmemSize = 1 << SmemPow;
constexpr unsigned int SmemMask = (SmemSize*SmemStages-1);
class SharedStorage{
public:
Array<int, SmemSize*SmemStages> array;
};
class Iterator{
public:
using Layout = layout::PitchLinear;
using LongIndex = typename Layout::LongIndex;
private:
const int *gmem_col_idx_;
int *smem_col_idx_;
const int block_size_;
const int base_idx_;
const int k_shape_;
const int ell_increment_;
const int array_length_;
int col_idx_base_;
int residue_;
int counter_;
int pow2_;
int residue_shape_;
int smem_offset_;
int smem_stage_;
int gmem_offset_;
int lane_;
bool is_pow2_;
bool is_residue_tile_;
public:
CUTLASS_DEVICE
void load_ell_indices(){
for(int i=threadIdx.x; i<SmemSize; i+=blockDim.x){
int idx = (gmem_offset_+i < array_length_) ? gmem_offset_+i : array_length_-1;
int gmem_col_idx = gmem_col_idx_[idx] - base_idx_;
smem_col_idx_[i + smem_stage_ * SmemSize] =
(gmem_col_idx >= 0) ? gmem_col_idx : -1;
}
gmem_offset_ += SmemSize;
smem_stage_ ^= 1;
}
CUTLASS_DEVICE
Iterator(
SharedStorage& shared_storage_base,
const int* col_idx,
const int& block_size,
const int& base_idx,
const int k_shape,
const int& problem_size_k,
const int& ell_stride,
const int& thread_idx)
: residue_(0),
counter_(0),
smem_offset_(0),
smem_stage_(0),
gmem_offset_(0),
block_size_(block_size),
base_idx_(base_idx),
k_shape_(k_shape),
ell_increment_(ell_stride * block_size),
array_length_((problem_size_k + block_size_ - 1) / block_size_),
residue_shape_(problem_size_k % k_shape_),
is_residue_tile_(residue_shape_ != 0),
smem_col_idx_(reinterpret_cast<int*>(&shared_storage_base.array)),
gmem_col_idx_(const_cast<int*>(col_idx)),
lane_(thread_idx % 32) {
load_ell_indices();
__syncthreads();
is_pow2_ = ((block_size_ & (block_size_ - 1)) == 0);
if( is_pow2_ && k_shape <= block_size_ ) lane_ = 0;
col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_;
pow2_ = 0;
while(block_size_ >> (pow2_ + 1)) ++pow2_;
}
CUTLASS_DEVICE
int get_blocksize(){
return block_size_;
}
CUTLASS_DEVICE
Iterator &operator++(){
if(is_residue_tile_){
residue_ += residue_shape_;
is_residue_tile_ = false;
} else {
residue_ += k_shape_;
}
if(residue_ < block_size_){
return *this;
}
if((array_length_ > SmemSize) && (((smem_offset_ >> SmemPow) & 1) != smem_stage_))
load_ell_indices();
if(residue_ == block_size_){
++smem_offset_;
counter_ += ell_increment_;
residue_ = 0;
col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_ - counter_;
return *this;
}
if(is_pow2_){
smem_offset_ += residue_ >> pow2_;
counter_ += (residue_ >> pow2_) * ell_increment_;
residue_ = residue_ & ((1 << pow2_) - 1);
}
else {
smem_offset_ += residue_ / block_size_;
counter_ += (residue_ / block_size_) * ell_increment_;
residue_ %= block_size_;
}
col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_ - counter_;
return *this;
}
CUTLASS_DEVICE
LongIndex get_offset(const int& idx) {
int num_jump_tiles;
if(is_pow2_)
num_jump_tiles = (idx + residue_) >> pow2_;
else
num_jump_tiles = (idx + residue_) / block_size_;
int tmp = __shfl_sync(0xffffffff, col_idx_base_, num_jump_tiles);
return tmp - num_jump_tiles * ell_increment_;
}
CUTLASS_DEVICE
LongIndex get_offset_fast() {
return col_idx_base_;
}
};
}
}
}
}
| 6,181 | C | 29.91 | 101 | 0.589063 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of tiles from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile
first, with the objective of minimizing predicate mask updates during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be stored in registers,
and integer addition is used to advance the pointer through memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "regular_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
int Alignment = sizeof_bits<Element>::value * ThreadMap::kElementsPerAccess / 8
>
class RegularTileIterator2dThreadTile;
/// Regular tile iterator specialized for pitch-linear + 2d thread-tiled threadmapping
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator2dThreadTile<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the contiguous or strided dimensions.");
private:
//
// Types
//
using AccessType = AlignedArray<Element, ThreadMap::ThreadAccessShape::kCount, kAlignment>;
//
// Data members
//
/// Pointer to memory
uint8_t *pointer_;
/// Stride quantity
StrideIndex stride_;
/// Amount to increment pointer along strided dimension
LongIndex increment_strided_;
/// Amount to advance pointer between tiles
LongIndex increment_advance_;
public:
CUTLASS_DEVICE
RegularTileIterator2dThreadTile(): pointer_(nullptr), increment_strided_(0), increment_advance_(0) { }
CUTLASS_DEVICE
RegularTileIterator2dThreadTile(
TensorRef const &ref,
int thread_idx,
int interleave
){
TensorCoord t = ThreadMap::initial_offset(thread_idx);
long int offset = t[0] * interleave + t[1] * ref.stride()[0]/interleave;
pointer_ = reinterpret_cast<uint8_t *>(ref.data() + offset);
stride_ = ref.stride()[0] / interleave;
increment_strided_ = (ref.stride()[0] * sizeof_bits<Element>::value / 8) * ThreadMap::Delta::kStrided / interleave;
increment_advance_ =
(kAdvanceRank == 0 ?
Shape::kContiguous * sizeof_bits<Element>::value / 8 :
Shape::kStrided * (ref.stride()[0] * sizeof_bits<Element>::value / 8) / interleave);
}
/// Loads a fragment
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
uint8_t const *byte_pointer = pointer_ + pointer_offset * sizeof_bits<Element>::value / 8;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_pointer);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int idx = c + s * ThreadMap::Iterations::kContiguous;
frag_ptr[idx] = access_ptr[c * ThreadMap::Delta::kContiguous / ThreadMap::ThreadAccessShape::kStrided];
}
if (s + 1 < ThreadMap::Iterations::kStrided) {
byte_pointer += increment_strided_;
}
}
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
load_with_pointer_offset(
frag,
tile_offset.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess +
tile_offset.strided() * Shape::kStrided * stride_
);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&frag);
uint8_t *byte_pointer = pointer_ + pointer_offset * sizeof_bits<Element>::value / 8;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_pointer);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int idx = c + s * ThreadMap::Iterations::kContiguous;
access_ptr[c * ThreadMap::Delta::kContiguous / ThreadMap::ThreadAccessShape::kStrided] = frag_ptr[idx];
}
if (s + 1 < ThreadMap::Iterations::kStrided) {
byte_pointer += increment_strided_;
}
}
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
store_with_pointer_offset(
frag,
tile_offset.contiguous() * Shape::kContiguous + tile_offset.strided() * Shape::kStrided * stride_
);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator++() {
pointer_ += increment_advance_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator--() {
pointer_ -= increment_advance_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
int offset = sizeof_bits<Element>::value *
(coord.contiguous() * Shape::kContiguous + coord.strided() * Shape::kStrided * stride_) / 8;
add_pointer_offset(offset);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Regular tile iterator specialized for interleaved layout + 2d thread-tiled threadmapping
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator2dThreadTile<Shape_, Element_, layout::RowMajorInterleaved<4>, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorInterleaved<4>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
using Underlying = RegularTileIterator2dThreadTile<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
kAlignment
>;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the row or column dimensions.");
private:
Underlying iterator_;
public:
CUTLASS_DEVICE
RegularTileIterator2dThreadTile() { }
CUTLASS_DEVICE
RegularTileIterator2dThreadTile(
TensorRef const &ref,
int thread_idx
):
iterator_({ref.data(), ref.stride()}, thread_idx, 4) {
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
iterator_.load_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()});
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
iterator_.load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
iterator_.store_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()});
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
iterator_.store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator--() {
--iterator_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Regular tile iterator specialized for interleaved layout + 2d thread-tiled threadmapping
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator2dThreadTile<Shape_, Element_, layout::ColumnMajorInterleaved<4>, AdvanceRank, ThreadMap_, Alignment> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorInterleaved<4>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
using PitchLinearThreadMap = PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
ThreadMap::kThreads, ThreadMap::ThreadAccessShape::kCount >;
using Underlying = RegularTileIterator2dThreadTile<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap
>;
static_assert(kAdvanceRank == 0 || kAdvanceRank == 1,
"Advance rank may only be along the row or column dimensions.");
private:
Underlying iterator_;
public:
CUTLASS_DEVICE
RegularTileIterator2dThreadTile() { }
CUTLASS_DEVICE
RegularTileIterator2dThreadTile(
TensorRef const &ref,
int thread_idx
):
iterator_({ref.data(), ref.stride()}, thread_idx, 4) {
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag, TensorCoord const & tile_offset) {
iterator_.load_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()});
}
/// Loads a fragment
CUTLASS_HOST_DEVICE
void load(Fragment &frag) {
iterator_.load_with_pointer_offset(frag, 0);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, TensorCoord const & tile_offset) {
iterator_.store_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()});
}
/// Stores a fragment
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
iterator_.store_with_pointer_offset(frag, 0);
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances the pointer
CUTLASS_HOST_DEVICE
RegularTileIterator2dThreadTile &operator--() {
--iterator_;
return *this;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
| 15,486 | C | 29.366667 | 128 | 0.671251 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of tiles
from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCongruous64b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicandCongruous64b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 64;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 64b");
///< Number of pointers
static int const kPointerCount = 1;
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous +
coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCongruous64b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous64b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCongruous64b,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCongruous64b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCongruous64b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCongruous64b,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for crosswise arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicand64bCrosswise,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicand64bCrosswise;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 64;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 64b");
///< Number of pointers - two pointers are needed if making more than 4 iterations along
///< strided dimension
static int const kPointerCount = (ThreadMap::Iterations::kStrided > 4 ? 2 : 1);
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_[Detail::kPointerCount];
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / ThreadMap::kElementsPerAccess) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data());
byte_offset_[0] = ref.offset(thread_offset_in_threadblock_tile) * sizeof(Element);
if (Detail::kPointerCount == 2) {
byte_offset_[1] = byte_offset_[0] ^ 8;
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset / ThreadMap::kElementsPerAccess;
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
// Map the logical contiguous and strided access to the internal swizzled structure.
int uniform_offset = (iteration_strided_ & 0x3) * stride_ + (iteration_strided_ >> 3) * 16 + stride_ * ThreadMap::Delta::kContiguous * iteration_contiguous_;
char *access_byte_ptr = reinterpret_cast<char *>(pointer_ + uniform_offset);
int byte_offset;
// This iterator may require two byte offsets if it must load more than 8 rows (or 2 iterations)
// in the strided dimension
if (Detail::kPointerCount == 2 && (iteration_strided_ & 0x4)) {
byte_offset = byte_offset_[1];
}
else {
byte_offset = byte_offset_[0];
}
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.strided() * Shape::kStrided + coord.contiguous() * Shape::kContiguous * stride_);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicand64bCrosswise,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicand64bCrosswise,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicand64bCrosswise,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicand64bCrosswise;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicand64bCrosswise,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCongruous128b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicandCongruous128b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128b");
///< Number of pointers
static int const kPointerCount = 1;
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous +
coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCongruous128b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous128b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCongruous128b,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCongruous128b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCongruous128b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCongruous128b,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCrosswise128x4,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicandCrosswise128x4;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128b");
///< Number of pointers
static int const kPointerCount = 1;
};
static_assert(!(ThreadMap::Iterations::kStrided % 2), "This iterator requires at least two iterations along the strided dimension");
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int offset_c = (iteration_contiguous_ * ThreadMap::Delta::kContiguous + (iteration_strided_ & 1) * 2);
int offset_s = (iteration_strided_ / 2) * 8;
int access_offset = offset_c * stride_ + offset_s;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous * stride_ +
coord.strided() * Shape::kStrided * Layout::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCrosswise128x4,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCrosswise128x4,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCrosswise128x4,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCrosswise128x4;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCrosswise128x4,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 47,789 | C | 30.174168 | 161 | 0.668627 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/ell_predicated_tile_access_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Ell iterator for Blocked-Ell matrix (ellValue matrix) used with EllMmaMultistage
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// EllPredicatedTileAccessIterator
///
template <typename Shape, typename Element, typename Layout, int AdvanceRank,
typename ThreadMap, typename AccessType>
class EllPredicatedTileAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for pitch-linear data.
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class EllPredicatedTileAccessIterator<Shape_, Element_, layout::PitchLinear,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
"Vectors implied by the thread map must be divisible by the access type.");
static int const kPredicatesPerByte = 4;
static int const kPredicatesPerWord = 4 * kPredicatesPerByte;
static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector;
/// Number of 32b words containing predicates
static int const kPredicateByteCount =
(kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte;
static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4;
static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u;
static_assert(kPredicateWordCount <= 4, "Too many predicates.");
/// Predicate vector stores mask to guard accesses
using Mask = Array<uint32_t, kPredicateWordCount>;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend EllPredicatedTileAccessIterator;
private:
/// stride of pitch-linear layout (units of Element)
LongIndex stride_;
/// amount (in byte) to increment pointer to move to next access along
/// strided dimension
LongIndex inc_strided_;
/// amount (in byte) to increment pointer from last access to first access
/// of next tile
LongIndex inc_next_;
/// amount (in byte) to increment pointer from first access of current tile
/// to first access of next tile
LongIndex inc_advance_;
public:
// Default ctor
CUTLASS_HOST_DEVICE
Params(): stride_(0), inc_strided_(0), inc_next_(0), inc_advance_(0) { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : stride_(layout.stride(0)) {
inc_strided_ = (LongIndex(stride_) * ThreadMap::Delta::kStrided) *
sizeof_bits<Element>::value / 8;
if (kAdvanceRank) {
// advance along strided dimension
inc_advance_ =
Shape::kStrided * LongIndex(stride_) * sizeof_bits<Element>::value / 8;
} else {
// advance along contiguous dimension
inc_advance_ = Shape::kContiguous * sizeof_bits<Element>::value / 8;
}
inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kStrided - 1) *
ThreadMap::Delta::kStrided * LongIndex(stride_) *
sizeof_bits<Element>::value / 8;
};
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Parameters object with precomputed internal state
Params const ¶ms_;
/// Internal pointer to first access of tile
BytePointer pointer_;
/// Guard predicates
uint32_t predicates_[kPredicateWordCount];
/// Size of tensor
TensorCoord extent_;
/// Initial offset for each thread
TensorCoord thread_offset_;
/// Offset to the first steady-state tile
TensorCoord residue_offset_;
/// Initial offset to define ELL block
TensorCoord ell_offset_;
/// Used for out-of-order visitation
bool is_residue_tile_;
/// Iteration along vectors implied by the thread map
int iteration_vector_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Computes predicates based on internally tracked per-thread offset.
CUTLASS_DEVICE
void compute_predicates_(
/// Extent of the matrix window
TensorCoord extent,
/// optionally, simplify predicate calculation during 'steady state' phase
bool is_steady_state = false) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0u;
}
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) {
int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int c = access_residual / kAccessesPerVector;
int v = access_residual % kAccessesPerVector;
TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements,
s * ThreadMap::Delta::kStrided);
TensorCoord coord = thread_offset_ + iteration_coord;
bool guard;
if (is_steady_state) {
if (kAdvanceRank == 0) {
guard = (coord.strided() < extent.strided());
} else {
guard = (coord.contiguous() < extent.contiguous());
}
} else {
guard = (coord.strided() < extent.strided() &&
coord.contiguous() < extent.contiguous());
}
int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx));
}
}
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: params_(params),
pointer_(reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(pointer))),
extent_(extent),
is_residue_tile_(true) {
TensorCoord residue_extent;
if (kAdvanceRank) {
typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.strided()) % Shape::kStrided;
if (!residue_size) {
residue_size = Shape::kStrided;
}
residue_offset_ = make_Coord(0, residue_size);
residue_extent = make_Coord(
extent_.contiguous(),
min(threadblock_offset.strided() + residue_size, extent_.strided())
);
} else {
typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.contiguous()) % Shape::kContiguous;
if (!residue_size) {
residue_size = Shape::kContiguous;
}
residue_offset_ = make_Coord(residue_size, 0);
residue_extent = make_Coord(
min(extent_.contiguous(), threadblock_offset.contiguous() + residue_size),
extent_.strided()
);
}
// Per-thread offset in logical coordinates of tensor
ell_offset_ = ThreadMap::initial_offset(thread_id);
thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id);
// update internal pointers
Layout layout(params_.stride_);
add_pointer_offset(layout(thread_offset_));
compute_predicates_(residue_extent, false);
set_iteration_index(0);
}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_vector_ = index % kAccessesPerVector;
int residual_access = index / kAccessesPerVector;
iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous;
iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
if (is_residue_tile_) {
thread_offset_ += residue_offset_;
Layout layout(params_.stride_);
add_pointer_offset(layout(residue_offset_));
compute_predicates_(extent_, true);
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided() - 1);
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous() - 1);
pointer_ += Shape::kStrided * tile_offset.strided();
}
} else {
if (kAdvanceRank) {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided());
pointer_ += Shape::kContiguous * tile_offset.contiguous();
} else {
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous());
pointer_ += Shape::kStrided * tile_offset.strided();
}
}
is_residue_tile_ = false;
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) / 8) + iteration_vector_;
}
/// Returns a k_location
CUTLASS_HOST_DEVICE
int get_k() const {
if(kAdvanceRank){ //strided
return ell_offset_.strided() + iteration_strided_ * ThreadMap::Delta::kStrided;
}else{
return ell_offset_.contiguous() + iteration_contiguous_ * ThreadMap::Delta::kContiguous + iteration_vector_ * AccessType::kElements;
}
}
CUTLASS_HOST_DEVICE
int get_stride() const {
if(kAdvanceRank)
return params_.stride_;
else
return 1;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iteration_vector_;
if (iteration_vector_ < kAccessesPerVector) {
return *this;
}
iteration_vector_ = 0;
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
pointer_ += params_.inc_strided_;
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
// advance to next tile
pointer_ += params_.inc_next_;
// now return to start tile - if the iterator is subsequently advanced, this
// subtraction as well as the subsequent integer addition are both elided by
// the compiler.
pointer_ -= params_.inc_advance_;
return *this;
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = enable ? 0u : predicates_[i];
}
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = 0xffffffff;
}
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
predicates_[i] = mask[i];
}
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] = predicates_[i];
}
}
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
Mask mask;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] = 0u;
}
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) {
int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector);
int c = access_residual / kAccessesPerVector;
int v = access_residual % kAccessesPerVector;
TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements,
s * ThreadMap::Delta::kStrided);
TensorCoord coord = ell_offset_ + iteration_coord;
bool guard;
if (kAdvanceRank == 0) {
guard = (coord.strided() < blocksize);
} else {
guard = (coord.contiguous() < blocksize);
}
int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
mask[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx));
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kPredicateWordCount; ++i) {
mask[i] &= predicates_[i];
}
set_mask(mask);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
int pred_idx =
iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous);
int word_idx = pred_idx / kPredicatesPerWord;
int residual = pred_idx % kPredicatesPerWord;
int byte_idx = residual / kPredicatesPerByte;
int bit_idx = residual % kPredicatesPerByte;
bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0;
return pred;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class EllPredicatedTileAccessIterator<Shape_, Element_, layout::ColumnMajor,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(),
threadblock_offset.column())) {}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
CUTLASS_HOST_DEVICE
int get_k() const {
return iterator_.get_k();
}
CUTLASS_HOST_DEVICE
int get_stride() const {
return iterator_.get_stride();
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_>
class EllPredicatedTileAccessIterator<Shape_, Element_, layout::RowMajor,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))){};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
///< Precomputed parameters object
Params const ¶ms,
///< Pointer to start of tensor
Pointer pointer,
///< Extent of tensor
TensorCoord extent,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
CUTLASS_HOST_DEVICE
int get_k() const {
return iterator_.get_k();
}
CUTLASS_HOST_DEVICE
int get_stride() const {
return iterator_.get_stride();
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for column-major interleaved data.
/// It is mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, int InterleavedK>
class EllPredicatedTileAccessIterator<Shape_, Element_,
layout::ColumnMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kRow * kInterleavedK,
Shape::kColumn / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row() * kInterleavedK,
extent.column() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row() * kInterleavedK,
threadblock_offset.column() / kInterleavedK)) {}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
CUTLASS_HOST_DEVICE
int get_k() const {
return iterator_.get_k();
}
CUTLASS_HOST_DEVICE
int get_stride() const {
return iterator_.get_stride();
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() { return iterator_.valid(); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileAccessIterator for row-major interleaved data.
/// It is mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, typename AccessType_, int InterleavedK>
class EllPredicatedTileAccessIterator<Shape_, Element_,
layout::RowMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessType_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::RowMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using AccessType = AccessType_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn * kInterleavedK,
Shape::kRow / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap,
AccessType>;
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileAccessIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column() * kInterleavedK,
extent.row() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column() * kInterleavedK,
threadblock_offset.row() / kInterleavedK)) {}
/// Construct a EllPredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileAccessIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
CUTLASS_HOST_DEVICE
int get_k() const {
return iterator_.get_k();
}
CUTLASS_HOST_DEVICE
int get_stride() const {
return iterator_.get_stride();
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileAccessIterator operator++(int) {
EllPredicatedTileAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() { return iterator_.valid(); }
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 44,443 | C | 31.897113 | 138 | 0.658776 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of small
scale and bias vectors in the shared memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// RegularScaleBiasVectorAccessIterator
///
template <typename Shape, typename Element, typename Layout>
class RegularScaleBiasVectorAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_>
class RegularScaleBiasVectorAccessIterator<Shape_, Element_, layout::PitchLinear> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
/// Element type per access
static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
static int const kThreads = Shape::kContiguous / kElementsPerAccess;
using AccessType = Array<Element, kElementsPerAccess>;
private:
//
// Data members
//
/// Internal pointer
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator(
TensorRef scale_bias_ref, ///< Pointer to the start of the scale and bias
///< vector
int thread_id ///< ID of each participating thread
)
: byte_offset_(0) {
// Per-thread offset in logical coordinates of tensor
int thread_offset = thread_id * kElementsPerAccess;
// initialize pointer
pointer_ =
reinterpret_cast<AccessType *>(scale_bias_ref.data() + thread_offset);
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
char *access_byte_ptr =
reinterpret_cast<char *>(pointer_);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator &operator++() { return *this; }
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator operator++(int) {
RegularScaleBiasVectorAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset in the unit of tile.
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
// Multiply by 2 because we store scale and bias belong to the same stage
// next to each other.
add_pointer_offset(coord.contiguous() * Shape::kContiguous * 2);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for row major layouts
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_>
class RegularScaleBiasVectorAccessIterator<
Shape_, Element_,
layout::RowMajor> {
public:
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
/// Underlying iterator type
using UnderlyingIterator = RegularScaleBiasVectorAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator(
TensorRef scale_bias_ref, ///< Pointer to the start of the scale and bias
///< vector
int thread_id ///< ID of each participating thread
)
: iterator_({scale_bias_ref.data(), scale_bias_ref.stride()}, thread_id) {
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularScaleBiasVectorAccessIterator operator++(int) {
RegularScaleBiasVectorAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 8,232 | C | 31.413386 | 100 | 0.657191 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of tiles from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile
first, with the objective of minimizing predicate mask updates during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be stored in registers,
and integer addition is used to advance the pointer through memory.
*/
#pragma once
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h"
#include "cutlass/transform/thread/transpose.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileIterator2dThreadTile
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
/// Regular tile iterator using a precomputed control structure to minimize register liveness
/// and integer arithmetic.
///
/// Layout is assumed to be invariant at the time the precomputed "Params" object is constructed.
///
/// Base pointer and tensor extents may be specified at the time the iterator is constructed.
/// Subsequently, they are assumed to be immutable.
///
/// Adding a logical coordinate offset may be performed at the time the iterator is constructed.
/// Subsequent additions to logical coordinate offset may be performed but are relatively expensive.
///
/// Vistitation order is intended to first visit a "residual" tile that may be partially full in
/// both the advance dimension and the steady-state dimension. This is assumed to be the last
/// tile in the iteration sequence. Advancing an iterator that has just been constructed moves to
/// the first tile that is full in the advance dimension and recomputes predicates. Subsequent
/// accesses may be performed without updating internal predicates and are efficient in terms of
/// live register state and pointer arithmetic instructions.
///
/// To be efficient, this assumes the iteraor will be dereferenced and advanced at least once
/// outside any looping structure to minimize integer arithmetic.
///
/// Acceses out of bounds are safe so long as `clear_mask()` is called prior to dereferencing
/// the iterator.
///
///
/// Example:
///
/// An efficient pipeline structure may be constructed as follows:
///
// template <typename Iterator>
// __global__ void kernel(
// typename Iterator::Params params,
// typename Iterator::Element *ptr,
// TensorCoord extent) {
//
// typename Iterator::Fragment fragment;
//
// TensorCoord threadblock_offset(0, 0);
//
// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets);
//
//
// fragment = *iter; // load "residue" tile first
// ++iter; // advance to first "steady state" tile and update internal masks
//
//
// #pragma unroll
// for (int i = Remaining - 1; i >= 0; --i) {
//
// f(fragment);
//
// if (!i) {
// iter.clear_mask(); // light-weight operation to clear masks - subsequent loads become NO-OPs.
// }
//
// fragment = *iter; // load tile during "steady state" phase
// ++iter; // advance to next tile - lightweight due to steady-state masks
// }
// }
//
// void host(TensorView<Element, 2, layout::PitchLinear> view) {
//
// using Iterator = transform::threadblock::PredicatedTileIterator2dThreadTile;
//
// typename Iterator::Params params(view.layout());
//
// kernel<Iterator>(params, view.data());
// }
///
///
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
bool Transpose = false
>
class PredicatedTileIterator2dThreadTile;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, bool Transpose_>
class PredicatedTileIterator2dThreadTile<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, Transpose_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
/// Type used for internal memory accesses
/// extra set of parenthesis is needed for VS compiler
struct alignas((ThreadMap::kElementsPerAccess * sizeof_bits<Element>::value /
8)) AccessType {
Array<Element, ThreadMap::kElementsPerAccess> storage;
static int const kElements = ThreadMap::kElementsPerAccess;
};
/// Optinally this fragment can be 4x4 transposed
using Transform = thread::Transpose< ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount , layout::PitchLinearShape<4,4>, Element>;
static bool const transpose = Transpose_;
/// Underlying iterator to compute the addresses
using TileAccessIterator =
PredicatedTileAccessIterator2dThreadTile<Shape, Element, Layout, kAdvanceRank,
ThreadMap, AccessType>;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::ThreadAccessShape::kCount>;
/// Predicate vector stores mask to guard accesses
using Mask = typename TileAccessIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
using Base = typename TileAccessIterator::Params::Base;
friend PredicatedTileIterator2dThreadTile;
private:
/// Parameters object
typename TileAccessIterator::Params params_;
public:
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : params_(layout) { }
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Base const &base)
: params_(base) {}
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: address_iterator_(params.params_, pointer, extent, thread_id,
threadblock_offset) {}
/// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile &operator++() {
if (kAdvanceRank)
address_iterator_.add_tile_offset({0, 1});
else
address_iterator_.add_tile_offset({1, 0});
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile operator++(int) {
PredicatedTileIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { address_iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { address_iterator_.get_mask(mask); }
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++){
int access_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + \
s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided;
address_iterator_.set_iteration_index(access_idx);
if (address_iterator_.valid()) {
frag_ptr[access_idx] =
*(address_iterator_.get() + pointer_offset);
}
++address_iterator_;
}
}
}
if (transpose) {
Transform t;
t.transform(frag, frag);
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++){
int access_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + \
s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided;
address_iterator_.set_iteration_index(access_idx);
if (address_iterator_.valid()) {
*(address_iterator_.get() + pointer_offset) = frag_ptr[access_idx];
}
++address_iterator_;
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
bool Transpose_
>
class PredicatedTileIterator2dThreadTile<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, Transpose_> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static bool const Transpose = Transpose_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIterator2dThreadTile<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
Transpose
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIterator2dThreadTile;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset, ///< Initial offset of threadblock
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())
) { }
/// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile operator++(int) {
PredicatedTileIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
bool Transpose_
>
class PredicatedTileIterator2dThreadTile<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, Transpose_> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static bool const Transpose = Transpose_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIterator2dThreadTile<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
Transpose
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIterator2dThreadTile;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) { }
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset, ///< Initial offset of threadblock
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())
) { }
/// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile operator++(int) {
PredicatedTileIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 27,175 | C | 33.48731 | 150 | 0.670948 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op_sm70.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of tiles from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile
first, with the objective of minimizing predicate mask updates during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be stored in registers,
and integer addition is used to advance the pointer through memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm70.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<
Shape_,
Element_,
layout::VoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>,
AdvanceRank,
ThreadMap_,
Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::VoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in length.
static int const kAccessSizeInBits = 128;
static_assert(
sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits,
"This iterator requires a policy whose access size is 128bs");
///< Number of pointers
static int const kPointerCount = (ThreadMap::Iterations::kStrided > 1 ? 2 : 1);
};
private:
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType * pointer_[Detail::kPointerCount];
/// Internal byte offset
Index byte_offset_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
// This is the offset of a thread within a threadblock tile for a specific pointer
// (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile =
thread_offset_base + layout::PitchLinearCoord{0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i};
// initialize pointer
pointer_[i] = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
add_pointer_offset((kAdvanceRank ? Shape::kStrided * stride_ * Layout::kElementsPerAccess : Shape::kContiguous));
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess +
coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess
);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
Index vec_pointer_offset = pointer_offset / ThreadMap::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = pointer_[s & 1];
int stride_idx = (s & ~1);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ +
c * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess +
vec_pointer_offset;
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
char const *access_byte_ptr = reinterpret_cast<char const *>(access_ptr + access_offset);
frag_ptr[access_idx] = *reinterpret_cast<AccessType const *>(access_byte_ptr + byte_offset_);
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
Index vec_pointer_offset = pointer_offset / ThreadMap::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = pointer_[s & 1];
int stride_idx = (s & ~1);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ +
c * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess +
vec_pointer_offset;
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
char *access_byte_ptr = reinterpret_cast<char *>(access_ptr + access_offset);
*reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_) = frag_ptr[access_idx];
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<
Shape_,
Element_,
layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>,
AdvanceRank,
ThreadMap_,
Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::VoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): iterator_({ref.data(), ref.stride()}, thread_id) {
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<
Shape_,
Element_,
layout::RowMajorVoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>,
AdvanceRank,
ThreadMap_,
Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorVoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::VoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): iterator_({ref.data(), ref.stride()}, thread_id) {
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<
Shape_,
Element_,
layout::VoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>,
AdvanceRank,
ThreadMap_,
Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::VoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in length.
static int const kAccessSizeInBits = 128;
static_assert(
sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits,
"This iterator requires a policy whose access size is 128bs");
///< Number of pointers
static int const kPointerCount = (ThreadMap::Iterations::kStrided > 1 ? 2 : 1);
};
private:
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType * pointer_[Detail::kPointerCount];
/// Internal byte offset
Index byte_offset_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
// This is the offset of a thread within a threadblock tile for a specific pointer
// (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile =
thread_offset_base + layout::PitchLinearCoord{0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i};
// initialize pointer
pointer_[i] = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
add_pointer_offset((kAdvanceRank ? Shape::kStrided * stride_ * Layout::kElementsPerAccess : Shape::kContiguous));
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess +
coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess
);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
Index vec_pointer_offset = pointer_offset / ThreadMap::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = pointer_[s & 1];
int stride_idx = (s & ~1);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ +
c * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess +
vec_pointer_offset;
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
char const *access_byte_ptr = reinterpret_cast<char const *>(access_ptr + access_offset);
frag_ptr[access_idx] = *reinterpret_cast<AccessType const *>(access_byte_ptr + byte_offset_);
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
Index vec_pointer_offset = pointer_offset / ThreadMap::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = pointer_[s & 1];
int stride_idx = (s & ~1);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ +
c * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess +
vec_pointer_offset;
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
char *access_byte_ptr = reinterpret_cast<char *>(access_ptr + access_offset);
*reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_) = frag_ptr[access_idx];
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<
Shape_,
Element_,
layout::ColumnMajorVoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>,
AdvanceRank,
ThreadMap_,
Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorVoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::VoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): iterator_({ref.data(), ref.stride()}, thread_id) {
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<
Shape_,
Element_,
layout::RowMajorVoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>,
AdvanceRank,
ThreadMap_,
Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorVoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::VoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): iterator_({ref.data(), ref.stride()}, thread_id) {
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
/// Tile iterator specialized for crosswise arrangements for TensorOps.
///
/// Volta TN SMEM layout is a little diffrent:
/// Crosseised elements will be stored in a line, while contiguous elements
/// sre stored in line-by-line.
/// Padding is used to reduce SMEM bank conflicts.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<
Shape_, Element_,
layout::VoltaTensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Shape_::kContiguous>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout =
layout::VoltaTensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Shape::kContiguous>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Internal details made public to facilitate introspection
struct Detail {
///< Number of pointers
static int const kPointerCount = (ThreadMap::Iterations::kStrided > 1 ? 2 : 1);
/// Iterations for the kElementsPerAccess of ThreadMap
static int const kIterarionsPerAccess =
ThreadMap::kElementsPerAccess / Layout::kElementsPerAccess;
/// Contiguous elements per line
static int const kContiguousElementsPerLine = 4;
};
private:
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
public:
/// Fragment object to be loaded or stored
using Fragment =
Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
private:
//
// Data members
//
/// The crosswised elements will be stored in a line.
/// line_size is size of crosswised dimention plus padding.
/// in units of AccessType
Index line_size;
/// Internal pointer to first access of tile
AccessType *pointer_[Detail::kPointerCount];
/// Internal byte offset
Index byte_offset_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: line_size(ref.stride(0) * Detail::kContiguousElementsPerLine / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base =
ThreadMap::initial_offset(thread_id);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Detail::kPointerCount; ++i) {
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile =
thread_offset_base +
layout::PitchLinearCoord{
0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i};
// initialize pointer
pointer_[i] = reinterpret_cast<AccessType *>(
ref.data() + ref.offset(thread_offset_in_threadblock_tile));
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
// (Shape::kContiguous/Layout::kElementsPerAccess)*
// line_size * Layout::kElementsPerAccess
add_pointer_offset(Shape::kContiguous * line_size);
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset((coord.contiguous() * (Shape::kContiguous / Layout::kElementsPerAccess) *
line_size + coord.strided() * Shape::kStrided) *
Layout::kElementsPerAccess);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
Index vec_pointer_offset = pointer_offset / Layout::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = pointer_[(s & 1) ^ (s / 2)];
access_ptr += 16 * (s / 2);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < Detail::kIterarionsPerAccess; ++i) {
int access_offset =
c * ThreadMap::Delta::kContiguous / Detail::kContiguousElementsPerLine * line_size +
vec_pointer_offset + i * line_size;
int access_idx = (c + s * ThreadMap::Iterations::kContiguous) *
Detail::kIterarionsPerAccess + i;
char const *access_byte_ptr = reinterpret_cast<char const*>(access_ptr + access_offset);
frag_ptr[access_idx] = *reinterpret_cast<AccessType const *>(
access_byte_ptr + byte_offset_);
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
Index vec_pointer_offset = pointer_offset / Layout::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
AccessType *access_ptr = pointer_[(s & 1) ^ ((s >> 1) & 1)];
access_ptr += 16 * (s / 2) + vec_pointer_offset;
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < Detail::kIterarionsPerAccess; ++i) {
int access_offset =
c * ThreadMap::Delta::kContiguous / Detail::kContiguousElementsPerLine * line_size + i * line_size;
int access_idx = (c + s * ThreadMap::Iterations::kContiguous) *
Detail::kIterarionsPerAccess + i;
char *access_byte_ptr = reinterpret_cast<char *>(access_ptr + access_offset);
*reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_) =
frag_ptr[access_idx];
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<Shape_, Element_,
layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Shape_::kRow>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Shape::kRow>;
static int const kAdvanceRank = AdvanceRank;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::VoltaTensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Shape::kRow>,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int Alignment
>
class RegularTileIterator<Shape_, Element_,
layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Shape_::kColumn>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Shape::kColumn>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::VoltaTensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Shape::kColumn>,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
| 43,663 | C | 28.886379 | 117 | 0.665048 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/vector_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template wraps the vector access iterator concept to load whole vector from tensors in
memory. This is typically used for per-channel scale and bias in convolution kernels.
*/
#pragma once
#include "cutlass/transform/threadblock/predicated_vector_access_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename VectorAccessIterator_>
class VectorIterator {
public:
using VectorAccessIterator = VectorAccessIterator_;
using Shape = typename VectorAccessIterator::Shape;
using Element = typename VectorAccessIterator::Element;
using Layout = typename VectorAccessIterator::Layout;
using TensorCoord = typename Layout::TensorCoord;
using AccessType = typename VectorAccessIterator::AccessType;
using TensorRef = typename VectorAccessIterator::TensorRef;
using Index = typename VectorAccessIterator::Index;
using LongIndex = typename VectorAccessIterator::LongIndex;
static int const kElementsPerAccess = VectorAccessIterator::kElementsPerAccess;
static int const kRowsPerIteration = VectorAccessIterator::kRowsPerIteration;
static int const kThreads = VectorAccessIterator::kThreads;
static int const kIterations = VectorAccessIterator::kIterations;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<
Element, kElementsPerAccess * kIterations>;
private:
/// Internal state
VectorAccessIterator vector_access_iterator_;
public:
/// Constructor
CUTLASS_HOST_DEVICE
VectorIterator(
Element const *ptr,
TensorCoord extent,
int thread_idx,
int warp_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
vector_access_iterator_(ptr, extent, thread_idx, warp_idx, threadblock_offset) { }
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
VectorIterator &operator++() {
vector_access_iterator_.advance();
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
VectorIterator operator++(int) {
VectorIterator self(*this);
operator++();
return self;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
frag.clear();
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < kIterations; ++c) {
cutlass::arch::global_load<
AccessType,
sizeof(AccessType)
>(
frag_ptr[c],
vector_access_iterator_.get() + pointer_offset,
vector_access_iterator_.valid()
);
++vector_access_iterator_;
}
// }
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
vector_access_iterator_.set_iteration_index(0);
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void advance() {
vector_access_iterator_.advance();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 5,226 | C | 33.846666 | 100 | 0.649445 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_params.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/pitch_linear.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Predicated tile access iterator descriptor object containing template dependent state
struct PredicatedTileAccessIteratorDesc {
int element_size_bits;
int advance_rank;
layout::PitchLinearCoord threadblock_shape;
layout::PitchLinearCoord threadmap_iterations;
layout::PitchLinearCoord threadmap_delta;
//
// Methods
//
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorDesc() { }
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorDesc(
int element_size_bits_,
int advance_rank_,
layout::PitchLinearCoord threadblock_shape_,
layout::PitchLinearCoord threadmap_iterations_,
layout::PitchLinearCoord threadmap_delta_
):
element_size_bits(element_size_bits_),
advance_rank(advance_rank_),
threadblock_shape(threadblock_shape_),
threadmap_iterations(threadmap_iterations_),
threadmap_delta(threadmap_delta_)
{
#if 0
printf("PredicatedTileAccessIteratorDesc(%d, %d, {%d, %d}, {%d, %d}, {%d, %d}})\n",
element_size_bits,
advance_rank,
threadblock_shape.contiguous(), threadblock_shape.strided(),
threadmap_iterations.contiguous(), threadmap_iterations.strided(),
threadmap_delta.contiguous(), threadmap_delta.strided());
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper template to construct an PredicatedTileAccessIteratorDesc from a template
// dependent state
template <
typename Shape, typename Element, typename Layout,
int AdvanceRank, typename ThreadMap>
struct MakePredicatedTileAccessIteratorDesc;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for pitch-linear data.
template <
typename Shape, typename Element, int AdvanceRank,
typename ThreadMap>
struct MakePredicatedTileAccessIteratorDesc <
Shape, Element, layout::PitchLinear, AdvanceRank, ThreadMap> {
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorDesc operator()() {
return PredicatedTileAccessIteratorDesc(
sizeof_bits<Element>::value,
AdvanceRank,
{Shape::kContiguous, Shape::kStrided},
{ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided},
{ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}
);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for column-major data.
template <
typename Shape, typename Element, int AdvanceRank,
typename ThreadMap>
struct MakePredicatedTileAccessIteratorDesc <
Shape, Element, layout::ColumnMajor, AdvanceRank, ThreadMap> {
static int const kAdvanceRank = AdvanceRank;
using UnderlyingMakeOperator = MakePredicatedTileAccessIteratorDesc<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap>;
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorDesc operator()() {
return UnderlyingMakeOperator()();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for row-major data.
template <
typename Shape, typename Element, int AdvanceRank,
typename ThreadMap>
struct MakePredicatedTileAccessIteratorDesc <
Shape, Element, layout::RowMajor, AdvanceRank, ThreadMap> {
static int const kAdvanceRank = AdvanceRank;
using UnderlyingMakeOperator = MakePredicatedTileAccessIteratorDesc<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap>;
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorDesc operator()() {
return UnderlyingMakeOperator()();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for column-major interleaved data.
template <
typename Shape, typename Element, int AdvanceRank,
typename ThreadMap, int InterleavedK>
struct MakePredicatedTileAccessIteratorDesc <
Shape, Element, layout::ColumnMajorInterleaved<InterleavedK>, AdvanceRank, ThreadMap> {
static int const kAdvanceRank = AdvanceRank;
static int const kInterleavedK = InterleavedK;
using UnderlyingMakeOperator = MakePredicatedTileAccessIteratorDesc<
layout::PitchLinearShape<Shape::kRow * kInterleavedK, Shape::kColumn / kInterleavedK>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap>;
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorDesc operator()() {
return UnderlyingMakeOperator()();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for roww-major interleaved data.
template <
typename Shape, typename Element, int AdvanceRank,
typename ThreadMap, int InterleavedK>
struct MakePredicatedTileAccessIteratorDesc <
Shape, Element, layout::RowMajorInterleaved<InterleavedK>, AdvanceRank, ThreadMap> {
static int const kAdvanceRank = AdvanceRank;
static int const kInterleavedK = InterleavedK;
using UnderlyingMakeOperator = MakePredicatedTileAccessIteratorDesc<
layout::PitchLinearShape<Shape::kColumn * kInterleavedK, Shape::kRow / kInterleavedK>, Element,
layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap>;
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorDesc operator()() {
return UnderlyingMakeOperator()();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Parameters struct
//
struct PredicatedTileAccessIteratorParams {
using Index = int32_t;
using LongIndex = int64_t;
//
// Data members
//
/// stride of pitch-linear layout (units of Element)
LongIndex stride_;
/// amount (in byte) to increment pointer to move to next access along
/// strided dimension
LongIndex inc_strided_;
/// amount (in byte) to increment pointer from last access to first access
/// of next tile
LongIndex inc_next_;
/// amount (in byte) to increment pointer from first access of current tile
/// to first access of next tile
LongIndex inc_advance_;
//
// Methods
//
CUTLASS_HOST_DEVICE
Status initialize(LongIndex stride, PredicatedTileAccessIteratorDesc desc) {
stride_ = stride;
inc_strided_ = (LongIndex(stride_) * desc.threadmap_delta.strided()) *
desc.element_size_bits / 8;
if (desc.advance_rank) {
// advance along strided dimension
inc_advance_ =
desc.threadblock_shape.strided() * LongIndex(stride_) * desc.element_size_bits / 8;
} else {
// advance along contiguous dimension
inc_advance_ = desc.threadblock_shape.contiguous() * desc.element_size_bits / 8;
}
inc_next_ = inc_advance_ - LongIndex(desc.threadmap_iterations.strided() - 1) *
desc.threadmap_delta.strided() * LongIndex(stride_) *
desc.element_size_bits / 8;
return Status::kSuccess;
}
CUTLASS_HOST_DEVICE
Status initialize(Index stride, PredicatedTileAccessIteratorDesc desc) {
return initialize(LongIndex(stride), desc);
}
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorParams() {
initialize(LongIndex(0), PredicatedTileAccessIteratorDesc());
}
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorParams(Index stride, PredicatedTileAccessIteratorDesc desc) {
initialize(stride, desc);
}
CUTLASS_HOST_DEVICE
PredicatedTileAccessIteratorParams(LongIndex stride, PredicatedTileAccessIteratorDesc desc) {
initialize(stride, desc);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 10,243 | C | 34.324138 | 101 | 0.647857 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing storing of tiles from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element_))>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout =
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element))>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in length.
static int const kAccessSizeInBits = 128;
static_assert(
sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits,
"This iterator requires a policy whose access size is 128bs");
};
private:
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>;
/// Underlying iterator to compute the addresses
using TileAccessIterator = RegularTileAccessIterator<Shape, Element, Layout,
kAdvanceRank, ThreadMap>;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: address_iterator_(ref, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
address_iterator_.add_tile_offset({0, 1});
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
address_iterator_.add_tile_offset(coord);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, Index byte_offset) {
address_iterator_.set_iteration_index(0);
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + byte_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
frag_ptr[access_idx] = *access_ptr;
++address_iterator_;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, Index byte_offset) {
address_iterator_.set_iteration_index(0);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset;
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr);
*access_ptr = frag_ptr[access_idx];
++address_iterator_;
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_byte_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, int(128 / sizeof(Element_))>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, int(128 / sizeof(Element))>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element))>,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): iterator_({ref.data(), ref.stride()}, thread_id) {
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileIterator<
Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element_))>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCongruous<
sizeof_bits<Element_>::value, int(128 / sizeof(Element))>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value,
int(128 / sizeof(Element))>,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
): iterator_({ref.data(), ref.stride()}, thread_id) {
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag,
Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for crosswise arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileIterator<Shape_, Element_,
layout::TensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout =
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128bs");
};
private:
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
public:
/// Fragment object to be loaded or stored
using Fragment =
Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>;
/// Underlying iterator to compute the addresses
using TileAccessIterator = RegularTileAccessIterator<Shape, Element, Layout,
kAdvanceRank, ThreadMap>;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: address_iterator_(ref, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
address_iterator_.add_tile_offset({1, 0});
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
address_iterator_.add_tile_offset(coord);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
address_iterator_.set_iteration_index(0);
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
frag_ptr[access_idx] = *(address_iterator_.get() + pointer_offset);
++address_iterator_;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, Index byte_offset) {
address_iterator_.set_iteration_index(0);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset;
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr);
*access_ptr = frag_ptr[access_idx];
++address_iterator_;
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileIterator<Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Crosswise>,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment, int Crosswise>
class RegularTileIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCrosswise<
sizeof_bits<Element_>::value, Crosswise>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value,
Crosswise>,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for k interleaved arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int InterleavedK, int Alignment>
class RegularTileIterator<
Shape_, Element_,
layout::TensorOpMultiplicandRowMajorInterleaved<sizeof_bits<Element_>::value,
InterleavedK>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout =
layout::TensorOpMultiplicandRowMajorInterleaved<sizeof_bits<Element_>::value,
InterleavedK>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128bs");
};
private:
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
public:
/// Fragment object to be loaded or stored
using Fragment =
Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>;
/// Underlying iterator to compute the addresses
using TileAccessIterator = RegularTileAccessIterator<Shape, Element, Layout,
kAdvanceRank, ThreadMap>;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: address_iterator_(ref, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
address_iterator_.add_pointer_offset(Shape::kCount);
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
address_iterator_.add_pointer_offset(coord.contiguous() * Shape::kCount);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
address_iterator_.set_iteration_index(0);
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
frag_ptr[access_idx] = *(address_iterator_.get() + pointer_offset);
++address_iterator_;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
int access_idx = c + s * ThreadMap::Iterations::kContiguous;
*(address_iterator_.get() + pointer_offset) = frag_ptr[access_idx];
++address_iterator_;
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for k interleaved arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int InterleavedK, int Alignment>
class RegularTileIterator<
Shape_, Element_,
layout::TensorOpMultiplicandColumnMajorInterleaved<sizeof_bits<Element_>::value,
InterleavedK>,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout =
layout::TensorOpMultiplicandColumnMajorInterleaved<sizeof_bits<Element_>::value,
InterleavedK>;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileIterator<
cutlass::MatrixShape<Shape::kColumn, Shape::kRow>,
Element,
layout::TensorOpMultiplicandRowMajorInterleaved<sizeof_bits<Element_>::value, InterleavedK>,
(kAdvanceRank == 1 ? 0 : 1),
ThreadMap
>;
public:
/// Fragment object to be loaded or stored
using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileIterator operator++(int) {
RegularTileIterator prev(*this);
++iterator_;
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.strided(), coord.contiguous()});
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 36,050 | C | 31.537004 | 116 | 0.655201 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/thread/matrix.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a matrix object intended for storing data in registers and operations within
a CUDA thread.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_coord.h"
namespace cutlass {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Per-thread matrix object storing a packed matrix
template <
typename Element,
int Rows,
int Columns,
typename Layout = layout::RowMajor
>
class Matrix : public Array<Element, Rows * Columns> {
public:
// Verify layout refers to a rank=2 matrix.
static_assert(
Layout::kRank == 2,
"Layout type must refer to a rank=2 matrix");
/// Base type
using Base = Array<Element, Rows * Columns>;
/// Element type
using Element = Element_;
/// Number of rows
static int const kRows = Rows;
/// Number of columns
static int const kColumns = Columns;
/// Layout within the array
using Layout = Layout_;
/// Reference type to an element
using Reference = Element &;
/// Logical rank of tensor index space
static int const kRank = 2;
/// Index type
using Index = typename Layout::Index;
/// Long index used for pointer offsets
using LongIndex = typename Layout::LongIndex;
/// Coordinate in logical tensor space
using TensorCoord = typename Layout::TensorCoord;
/// Stride type
using Stride = typename Layout::Stride;
/// TensorRef to matrix object
using TensorRef = TensorRef<Element, kRank, Layout>;
/// TensorRef to constant matrix object
using ConstTensorRef = typename TensorRef::ConstTensorRef;
/// TensorRef to matrix object
using TensorView = TensorView<Element, kRank, Layout>;
/// TensorRef to constant matrix object
using ConstTensorView = typename TensorView::ConstTensorView;
/// Diagonal vector
using Diagonal = Vector<Element, __NV_STD_MIN(kRows, kColumns)>;
private:
public:
//
// Methods
//
/// Returns the size of the object
CUTLASS_HOST_DEVICE
static MatrixCoord extent() {
return make_Coord(kRows, kColumns);
}
/// Returns the layout object
CUTLASS_HOST_DEVICE
static Layout layout() {
return Layout::packed(extent());
}
/// Ctor
CUTLASS_HOST_DEVICE
Matrix() { }
/// Ctor
CUTLASS_HOST_DEVICE
Matrix(Diagonal const &diag) {
// Todo - construct from diagonal
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
TensorRef ref() {
return TensorRef(this->data(), layout());
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
ConstTensorRef const_ref() const {
return ConstTensorRef(this->data(), layout());
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
TensorView view() {
return TensorView(ref(), extent());
}
/// Returns a TensorView to const data
CUTLASS_HOST_DEVICE
ConstTensorView const_view() const {
return ConstTensorView(const_ref(), extent());
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference at(MatrixCoord const& coord) const {
typename Base::size_type offset_(layout().offset(coord));
return Base::at(offset_);
}
/// Returns the number of scalar elements needed to store tensor.
CUTLASS_HOST_DEVICE
LongIndex capacity() const {
return LongIndex(Base::size());
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Column vector defined as a matrix with exactly one column
template <
typename Element,
int Rows,
typename Layout = layout::ColumnMajor
>
using ColumnVector = Matrix<Element, Rows, 1, Layout>;
/// Row vector defined as a matrix with exactly one row
template <
typename Element,
int Columns,
typename Layout = layout::RowMajor
>
using RowVector = Matrix<Element, 1, Columns, Layout>;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace cutlass
| 5,931 | C | 28.66 | 100 | 0.658742 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/reduction/thread/reduction_operators.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over densely packed tensors in global memory
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mixed-precision reduction
template <
typename ElementAccumulator_,
typename Element_,
int Count = 1
>
struct ReduceAdd {
//
// Type definitions
//
using ElementAccumulator = ElementAccumulator_;
using Element = Element_;
static int const kCount = Count;
using FragmentAccumulator = cutlass::Array<ElementAccumulator, kCount>;
using FragmentElement = cutlass::Array<Element, kCount>;
struct Params { };
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
ReduceAdd(Params params_ = Params()): params(params_) { }
/// Operator
CUTLASS_HOST_DEVICE
FragmentAccumulator operator()(
FragmentAccumulator accumulator,
FragmentElement element) const {
plus<FragmentAccumulator> op;
NumericArrayConverter<
ElementAccumulator,
Element,
kCount,
PreferredRoundingMode<ElementAccumulator, Element>::kRound> converter;
return op(accumulator, converter(element));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Special handling for binary operators
template <typename ReductionOp, typename Element, int N>
struct VectorizeArrayOperation {
using ValueType = Array<Element, N>;
CUTLASS_HOST_DEVICE
ValueType operator()(
ReductionOp const &reduction_op,
ValueType const &lhs,
ValueType const &rhs) const {
ValueType result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = reduction_op(lhs[i], rhs[i]);
}
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ReductionOp, typename Element, int N>
struct ReduceArrayOperation {
using ArrayType = Array<Element, N>;
CUTLASS_HOST_DEVICE
Element operator()(
ReductionOp const &reduction_op,
ArrayType const &array) const {
Element item = reduction_op(array[0], array[1]);
CUTLASS_PRAGMA_UNROLL
for (int i = 2; i < N; ++i) {
item = reduction_op(item, array[i]);
}
return item;
}
};
template <int N>
struct ReduceArrayOperation<logical_and<uint1b_t>, uint1b_t, N> {
using ArrayType = Array<uint1b_t, N>;
CUTLASS_HOST_DEVICE
uint1b_t operator()(
logical_and<uint1b_t> const &reduction_op,
ArrayType const &array) const {
uint8_t const *ptr = reinterpret_cast<uint8_t const *>(&array);
bool item = false;
CUTLASS_PRAGMA_UNROLL
for (int byte = 0; byte < (N + 7) / 8; ++byte) {
uint8_t bits = ptr[byte];
item = (item || !bits);
}
return uint1b_t(!item);
}
};
template <int N>
struct ReduceArrayOperation<logical_or<uint1b_t>, uint1b_t, N> {
using ArrayType = Array<uint1b_t, N>;
CUTLASS_HOST_DEVICE
uint1b_t operator()(
logical_and<uint1b_t> const &reduction_op,
ArrayType const &array) const {
uint8_t const *ptr = reinterpret_cast<uint8_t const *>(&array);
bool item = true;
CUTLASS_PRAGMA_UNROLL
for (int byte = 0; byte < (N + 7) / 8; ++byte) {
uint8_t bits = ptr[byte];
item = (item || bits);
}
return uint1b_t(item);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper function to infer template argument types
template <typename ReductionOp, typename Element, int N>
CUTLASS_HOST_DEVICE
Array<Element, N> ApplyArrayOperator(
ReductionOp const &reduction_op,
Array<Element, N> const &lhs,
Array<Element, N> const &rhs) {
VectorizeArrayOperation<ReductionOp, Element, N> vectorize_op;
return vectorize_op(reduction_op, lhs, rhs);
}
/// Helper to reduce an array
template <typename ReductionOp, typename Element, int N>
Element ReduceArray(ReductionOp const &reduction_op, Array<Element, N> const &array) {
ReduceArrayOperation<ReductionOp, Element, N> reduce_array_op;
return reduce_array_op(reduction_op, array);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace reduction
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 6,790 | C | 27.775424 | 100 | 0.602504 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.