text
stringlengths
27
947k
id
stringlengths
18
126
metadata
dict
__index_level_0__
int64
0
80
################################################################################ # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################ """ Basic example of using the CUTLASS Python interface to run a 2d convolution """ import sys print("This example is deprecated. Please see examples/python for examples of using " "the CUTLASS Python interface.") sys.exit(0) import argparse import numpy as np import torch import cutlass_bindings import cutlass.backend as pycutlass from cutlass.backend import * from cutlass.backend.utils.reference_model import Conv2dReferenceModule from cutlass.backend.utils.device import device_cc parser = argparse.ArgumentParser( description=("Launch a 2d convolution kernel from Python. " "See https://docs.nvidia.com/deeplearning/performance/dl-performance-convolutional/index.html#convo-intro for notation.")) parser.add_argument("--n", default=1, type=int, help="N dimension of the convolution") parser.add_argument("--c", default=64, type=int, help="C dimension of the convolution") parser.add_argument("--h", default=32, type=int, help="H dimension of the convolution") parser.add_argument("--w", default=32, type=int, help="W dimension of the convolution") parser.add_argument("--k", default=32, type=int, help="N dimension of the convolution") parser.add_argument("--r", default=3, type=int, help="R dimension of the convolution") parser.add_argument("--s", default=3, type=int, help="S dimension of the convolution") parser.add_argument('--print_cuda', action="store_true", help="Print the underlying CUDA kernel") try: args = parser.parse_args() except: sys.exit(0) # Check that the device is of a sufficient compute capability cc = device_cc() assert cc >= 70, "The CUTLASS Python Conv2d example requires compute capability greater than or equal to 70." alignment = 1 np.random.seed(0) # Allocate a pool of device memory to be used by the kernel pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32) # Set the compiler to use to NVCC pycutlass.compiler.nvcc() # Set up A, B, C and accumulator A = TensorDescription(cutlass_bindings.float16, cutlass_bindings.TensorNHWC, alignment) B = TensorDescription(cutlass_bindings.float16, cutlass_bindings.TensorNHWC, alignment) C = TensorDescription(cutlass_bindings.float32, cutlass_bindings.TensorNHWC, alignment) element_acc = cutlass_bindings.float32 element_epilogue = cutlass_bindings.float32 # Select instruction shape based on the Tensor Core instructions supported # by the device on which we are running if cc == 70: instruction_shape = [8, 8, 4] elif cc == 75: instruction_shape = [16, 8, 8] else: # Use CUTLASS kernels for CC 80 by default (e.g., for cases in which SM86 is used) cc = 80 instruction_shape = [16, 8, 16] math_inst = MathInstruction( instruction_shape, A.element, B.element, element_acc, cutlass_bindings.OpClass.TensorOp, MathOperation.multiply_add ) tile_description = TileDescription( [128, 128, 32], # Threadblock shape 2, # Number of stages [2, 2, 1], # Number of warps within each dimension of the threadblock shape math_inst ) epilogue_functor = pycutlass.LinearCombination(C.element, C.alignment, element_acc, element_epilogue) operation = Conv2dOperation( conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized, arch=cc, tile_description=tile_description, A=A, B=B, C=C, stride_support=StrideSupport.Strided, epilogue_functor=epilogue_functor ) if args.print_cuda: print(operation.rt_module.emit()) operations = [operation, ] # Compile the operation pycutlass.compiler.add_module(operations) # Randomly initialize tensors problem_size = cutlass_bindings.conv.Conv2dProblemSize( cutlass_bindings.Tensor4DCoord(args.n, args.h, args.c, args.w), cutlass_bindings.Tensor4DCoord(args.k, args.r, args.s, args.c), cutlass_bindings.Tensor4DCoord(0, 0, 0, 0), # Padding cutlass_bindings.MatrixCoord(1, 1), # Strides cutlass_bindings.MatrixCoord(1, 1), # Dilation cutlass_bindings.conv.Mode.cross_correlation, 1, # Split k slices 1 # Groups ) tensor_A_size = cutlass_bindings.conv.implicit_gemm_tensor_a_size(operation.conv_kind, problem_size) tensor_B_size = cutlass_bindings.conv.implicit_gemm_tensor_b_size(operation.conv_kind, problem_size) tensor_C_size = cutlass_bindings.conv.implicit_gemm_tensor_c_size(operation.conv_kind, problem_size) tensor_A = torch.ceil(torch.empty(size=(tensor_A_size,), dtype=torch.float16, device="cuda").uniform_(-8.5, 7.5)) tensor_B = torch.ceil(torch.empty(size=(tensor_B_size,), dtype=torch.float16, device="cuda").uniform_(-8.5, 7.5)) tensor_C = torch.ceil(torch.empty(size=(tensor_C_size,), dtype=torch.float32, device="cuda").uniform_(-8.5, 7.5)) tensor_D = torch.ones(size=(tensor_C_size,), dtype=torch.float32, device="cuda") alpha = 1. beta = 0. arguments = Conv2dArguments( operation=operation, problem_size=problem_size, A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D, output_op=operation.epilogue_type(alpha, beta) ) # Run the operation operation.run(arguments) arguments.sync() # Run the host reference module and compare to the CUTLASS result reference = Conv2dReferenceModule(A, B, C, operation.conv_kind) tensor_D_ref = reference.run(tensor_A, tensor_B, tensor_C, problem_size, alpha, beta) try: assert torch.equal(tensor_D, tensor_D_ref) except: assert torch.allclose(tensor_D, tensor_D_ref, rtol=1e-2) print("Passed.")
cutlass/examples/40_cutlass_py/conv2d.py/0
{ "file_path": "cutlass/examples/40_cutlass_py/conv2d.py", "repo_id": "cutlass", "token_count": 2545 }
8
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Scheduler for grouped FMHA */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/gemm/kernel/grouped_problem_visitor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { // Helper for correctly representing problem sizes in grouped kernels template <typename ThreadblockShape> struct FMHAGroupedProblemSizeHelper { CUTLASS_HOST_DEVICE static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) { // FMHA only partitions tiles across the M dimension. return cutlass::gemm::GemmCoord( ((problem.m() - 1 + ThreadblockShape::kM) / ThreadblockShape::kM), 1, 1); } CUTLASS_HOST_DEVICE static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {} CUTLASS_HOST_DEVICE static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) { return grid.m() * grid.n(); } }; } // namespace detail /// Visitor class to abstract away the algorithm for iterating over tiles template <typename ThreadblockShape, GroupScheduleMode GroupScheduleMode_, int PrefetchTileCount, int ThreadCount, bool Transposed = false> struct FMHAGroupedProblemVisitor : public GroupedProblemVisitor< detail::FMHAGroupedProblemSizeHelper<ThreadblockShape>, ThreadblockShape, GroupScheduleMode_, PrefetchTileCount, ThreadCount> { using ProblemSizeHelper = detail::FMHAGroupedProblemSizeHelper<ThreadblockShape>; using Base = GroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape, GroupScheduleMode_, PrefetchTileCount, ThreadCount>; using BaseParams = typename Base::Params; using SharedStorage = typename Base::SharedStorage; cutlass::gemm::GemmCoord const *problem_sizes0; cutlass::gemm::GemmCoord const *problem_sizes1; struct Params { cutlass::gemm::GemmCoord const *problem_sizes0; cutlass::gemm::GemmCoord const *problem_sizes1; int32_t problem_count; void const *workspace; int32_t tile_count; // // Methods // /// Ctor CUTLASS_HOST_DEVICE Params(): problem_sizes0(nullptr), problem_sizes1(nullptr), problem_count(0), workspace(nullptr), tile_count(0) { } /// Ctor CUTLASS_HOST_DEVICE Params( cutlass::gemm::GemmCoord const *problem_sizes0, cutlass::gemm::GemmCoord const *problem_sizes1, int32_t problem_count, void const *workspace = nullptr, int32_t tile_count = 0 ): problem_sizes0(problem_sizes0), problem_sizes1(problem_sizes1), problem_count(problem_count), workspace(workspace), tile_count(tile_count) {} /// Convert the FMHA-specific parameters to those used by the base class CUTLASS_HOST_DEVICE BaseParams to_base() const { return BaseParams(// Set problem_sizes as problem_sizes1 because these determine // shape of the final output of FMHA problem_sizes1, problem_count, workspace, tile_count); } }; // // Methods // CUTLASS_DEVICE FMHAGroupedProblemVisitor( Params const &params_, SharedStorage &shared_storage_, int32_t block_idx ): Base ( params_.to_base(), shared_storage_, block_idx), problem_sizes0(params_.problem_sizes0), problem_sizes1(params_.problem_sizes1) {} /// Returns the problem size 0 for the current problem CUTLASS_HOST_DEVICE cutlass::gemm::GemmCoord problem_size0() const { GemmCoord problem = problem_sizes0[this->problem_idx]; ProblemSizeHelper::possibly_transpose_problem(problem); return problem; } /// Returns the problem size 1 for the current problem CUTLASS_HOST_DEVICE cutlass::gemm::GemmCoord problem_size1() const { GemmCoord problem = problem_sizes1[this->problem_idx]; ProblemSizeHelper::possibly_transpose_problem(problem); return problem; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/41_fused_multi_head_attention/fmha_grouped_problem_visitor.h/0
{ "file_path": "cutlass/examples/41_fused_multi_head_attention/fmha_grouped_problem_visitor.h", "repo_id": "cutlass", "token_count": 2448 }
9
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of tiles from pitch-linear rank=2 tensors. This iterator uses masks to guard out-of-bounds accesses. The first tile this iterator visits maybe partial, then the remaining tiles are complete. So, we only need to compute the predicates twice, once before the first tile and once for the remaining full tiles which can share the same predicates. A precomputed "Params" object minimizes the amount of state that must be stored in registers, and integer addition is used to advance the pointer through memory. */ #pragma once #include "cutlass/arch/memory.h" #include "cutlass/transform/threadblock/predicated_tile_access_iterator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// PredicatedTileIteratorResidualLast /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// /// Regular tile iterator using a precomputed control structure to minimize /// register liveness and integer arithmetic. /// /// Layout is assumed to be invariant at the time the precomputed "Params" /// object is constructed. /// /// Base pointer and tensor extents may be specified at the time the iterator is /// constructed. Subsequently, they are assumed to be immutable. /// /// Adding a logical coordinate offset may be performed at the time the iterator /// is constructed. Subsequent additions to logical coordinate offset may be /// performed but are relatively expensive. /// /// Visitation order is intended to first visit a "residual" tile that may be /// partially full in both the advance dimension and the steady-state dimension. /// This is assumed to be the last tile in the iteration sequence. Advancing an /// iterator that has just been constructed moves to the first tile that is full /// in the advance dimension and recomputes predicates. Subsequent accesses may /// be performed without updating internal predicates and are efficient in terms /// of live register state and pointer arithmetic instructions. /// /// To be efficient, this assumes the iterator will be dereferenced and advanced /// at least once outside any looping structure to minimize integer arithmetic. /// /// Acceses out of bounds are safe so long as `clear_mask()` is called prior to /// dereferencing the iterator. /// /// /// Example: /// /// An efficient pipeline structure may be constructed as follows: /// // template <typename Iterator> // __global__ void kernel( // typename Iterator::Params params, // typename Iterator::Element *ptr, // TensorCoord extent) { // // typename Iterator::Fragment fragment; // // TensorCoord threadblock_offset(0, 0); // // Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets); // // // fragment = *iter; // load "residue" tile first // ++iter; // advance to first "steady state" tile and update // internal masks // // // #pragma unroll // for (int i = Remaining - 1; i >= 0; --i) { // // f(fragment); // // if (!i) { // iter.clear_mask(); // light-weight operation to clear masks - // subsequent loads become NO-OPs. // } // // fragment = *iter; // load tile during "steady state" phase // ++iter; // advance to next tile - lightweight due to // steady-state masks // } // } // // void host(TensorView<Element, 2, layout::PitchLinear> view) { // // using Iterator = // transform::threadblock::PredicatedTileIteratorResidualLast; // // typename Iterator::Params params(view.layout()); // // kernel<Iterator>(params, view.data()); // } /// /// template < typename Shape, typename Element, typename Layout, int AdvanceRank, typename ThreadMap, int AccessSize = ThreadMap::kElementsPerAccess, bool Gather = false> class PredicatedTileIteratorResidualLast; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileIteratorResidualLast for pitch-linear data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int AccessSize, bool Gather> class PredicatedTileIteratorResidualLast< Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, AccessSize, Gather> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::PitchLinear; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; /// Type used for internal memory accesses using AccessType = AlignedArray< Element, AccessSize, (AccessSize * sizeof_bits<Element>::value / 8)>; /// Underlying iterator to compute the addresses using TileAccessIterator = PredicatedTileAccessIteratorResidualLast< Shape, Element, Layout, kAdvanceRank, ThreadMap, AccessType, Gather>; static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector; /// Fragment object to be loaded or stored using Fragment = cutlass::Array< Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>; /// Predicate vector stores mask to guard accesses using Mask = typename TileAccessIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { public: using Base = typename TileAccessIterator::Params::Base; friend PredicatedTileIteratorResidualLast; private: /// Parameters object typename TileAccessIterator::Params params_; public: /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : params_(layout) {} CUTLASS_HOST_DEVICE Params() {} CUTLASS_HOST_DEVICE Params(Base const& base) : params_(base) {} }; private: /// Internal pointer type permits fast address arithmetic using BytePointer = char*; private: // // Data members // /// Data member to the tile access iterator TileAccessIterator address_iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( /// Precomputed parameters object Params const& params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const& threadblock_offset, /// Gather indices int const* indices = nullptr) : address_iterator_( params.params_, pointer, extent, thread_id, threadblock_offset, indices) {} /// Construct a PredicatedTileIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { address_iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast& operator++() { if (kAdvanceRank) address_iterator_.add_tile_offset({0, 1}); else address_iterator_.add_tile_offset({1, 0}); return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast operator++(int) { PredicatedTileIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); } CUTLASS_HOST_DEVICE void set_residual_tile(bool enable) { address_iterator_.set_residual_tile(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { address_iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { address_iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { address_iterator_.get_mask(mask); } CUTLASS_DEVICE void load_with_pointer_offset(Fragment& frag, Index pointer_offset) { load_with_byte_offset( frag, pointer_offset * sizeof_bits<Element>::value / 8); } CUTLASS_DEVICE void load_with_byte_offset(Fragment& frag, LongIndex byte_offset) { AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kAccessesPerVector; ++v) { int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); address_iterator_.set_iteration_index(idx); char const* byte_ptr = reinterpret_cast<char const*>(address_iterator_.get()) + byte_offset; AccessType const* access_ptr = reinterpret_cast<AccessType const*>(byte_ptr); cutlass::arch::global_load<AccessType, sizeof(AccessType)>( frag_ptr[idx], access_ptr, address_iterator_.valid()); ++address_iterator_; } } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment& frag) { load_with_byte_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) { store_with_byte_offset( frag, pointer_offset * sizeof_bits<Element>::value / 8); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset) { address_iterator_.set_iteration_index(0); AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kAccessesPerVector; ++v) { int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); char* byte_ptr = reinterpret_cast<char*>(address_iterator_.get()) + byte_offset; AccessType* access_ptr = reinterpret_cast<AccessType*>(byte_ptr); if (address_iterator_.valid()) { *access_ptr = frag_ptr[idx]; } ++address_iterator_; } } } } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const& frag) { store_with_byte_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileIteratorResidualLast for pitch-linear data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int AccessSize, bool Gather> class PredicatedTileIteratorResidualLast< Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, AccessSize, Gather> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; using UnderlyingIterator = PredicatedTileIteratorResidualLast< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessSize, Gather>; using AccessType = typename UnderlyingIterator::AccessType; /// Fragment object to be loaded or stored using Fragment = cutlass::Array< Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileIteratorResidualLast; /// Parameters object typename UnderlyingIterator::Params params_; public: CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {} CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id, ///< ID of each participating thread TensorCoord const& threadblock_offset, ///< Initial offset of threadblock int const* indices = nullptr ///< gather/scatter indices, note no support for ///< gather/scatter at this specialization ) : iterator_( params.params_, pointer, layout::PitchLinearCoord(extent.row(), extent.column()), thread_id, layout::PitchLinearCoord( threadblock_offset.row(), threadblock_offset.column()), indices) {} /// Construct a PredicatedTileIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast& operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast operator++(int) { PredicatedTileIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } CUTLASS_HOST_DEVICE void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { iterator_.get_mask(mask); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment& frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment& frag, LongIndex byte_offset) { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment& frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset) { iterator_.store_with_byte_offset(frag, byte_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileIteratorResidualLast for pitch-linear data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int AccessSize, bool Gather> class PredicatedTileIteratorResidualLast< Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, AccessSize, Gather> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; using UnderlyingIterator = PredicatedTileIteratorResidualLast< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessSize, Gather>; using AccessType = typename UnderlyingIterator::AccessType; /// Fragment object to be loaded or stored using Fragment = cutlass::Array< Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileIteratorResidualLast; /// Parameters object typename UnderlyingIterator::Params params_; public: CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {} CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id, ///< ID of each participating thread TensorCoord const& threadblock_offset, ///< Initial offset of threadblock int const* indices = nullptr ///< Gather indices ) : iterator_( params.params_, pointer, layout::PitchLinearCoord(extent.column(), extent.row()), thread_id, layout::PitchLinearCoord( threadblock_offset.column(), threadblock_offset.row()), indices) {} /// Construct a PredicatedTileIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast& operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast operator++(int) { PredicatedTileIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } CUTLASS_HOST_DEVICE void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { iterator_.get_mask(mask); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment& frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment& frag, LongIndex byte_offset) { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment& frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset) { iterator_.store_with_byte_offset(frag, byte_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileIteratorResidualLast for affine rank-2 data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int AccessSize> class PredicatedTileIteratorResidualLast< Shape_, Element_, layout::AffineRankN<2>, AdvanceRank, ThreadMap_, AccessSize, false> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::AffineRankN<2>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; /// Type used for internal memory accesses using AccessType = AlignedArray< Element, AccessSize, (AccessSize * sizeof_bits<Element>::value / 8)>; /// Underlying iterator to compute the addresses using TileAccessIterator = PredicatedTileAccessIteratorResidualLast< Shape, Element, Layout, kAdvanceRank, ThreadMap, AccessType>; static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector; /// Fragment object to be loaded or stored using Fragment = cutlass::Array< Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>; /// Predicate vector stores mask to guard accesses using Mask = typename TileAccessIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { public: friend PredicatedTileIteratorResidualLast; private: /// Parameters object typename TileAccessIterator::Params params_; public: /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : params_(layout) {} CUTLASS_HOST_DEVICE Params() {} }; private: /// Internal pointer type permits fast address arithmetic using BytePointer = char*; private: // // Data members // /// Data member to the tile access iterator TileAccessIterator address_iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( /// Precomputed parameters object Params const& params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const& threadblock_offset, int const* indices = nullptr ///< gather/scatter indices, note no support for ///< gather/scatter at this specialization ) : address_iterator_( params.params_, pointer, extent, thread_id, threadblock_offset) {} /// Construct a PredicatedTileIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { address_iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast& operator++() { if (kAdvanceRank) address_iterator_.add_tile_offset(make_Coord(0, 1)); else address_iterator_.add_tile_offset(make_Coord(1, 0)); return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast operator++(int) { PredicatedTileIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); } CUTLASS_HOST_DEVICE void set_residual_tile(bool enable) { address_iterator_.set_residual_tile(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { address_iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { address_iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { address_iterator_.get_mask(mask); } CUTLASS_DEVICE void load_with_pointer_offset(Fragment& frag, Index pointer_offset) { load_with_byte_offset( frag, pointer_offset * sizeof_bits<Element>::value / 8); } CUTLASS_DEVICE void load_with_byte_offset(Fragment& frag, LongIndex byte_offset) { AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kAccessesPerVector; ++v) { int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); address_iterator_.set_iteration_index(idx); char const* byte_ptr = reinterpret_cast<char const*>(address_iterator_.get()) + byte_offset; AccessType const* access_ptr = reinterpret_cast<AccessType const*>(byte_ptr); cutlass::arch::global_load<AccessType, sizeof(AccessType)>( frag_ptr[idx], access_ptr, address_iterator_.valid()); ++address_iterator_; } } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment& frag) { load_with_byte_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) { store_with_byte_offset( frag, pointer_offset * sizeof_bits<Element>::value / 8); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset) { address_iterator_.set_iteration_index(0); AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kAccessesPerVector; ++v) { int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); char* byte_ptr = reinterpret_cast<char*>(address_iterator_.get()) + byte_offset; AccessType* access_ptr = reinterpret_cast<AccessType*>(byte_ptr); if (address_iterator_.valid()) { *access_ptr = frag_ptr[idx]; } ++address_iterator_; } } } } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const& frag) { store_with_byte_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileIteratorResidualLast for affine rank 2 /// column-major data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int AccessSize> class PredicatedTileIteratorResidualLast< Shape_, Element_, layout::AffineRank2ColumnMajor, AdvanceRank, ThreadMap_, AccessSize, false> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::AffineRank2ColumnMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; // Map to the underlying AffineRankN<2> layout using UnderlyingIterator = PredicatedTileIteratorResidualLast< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::AffineRankN<2>, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessSize>; using AccessType = typename UnderlyingIterator::AccessType; /// Fragment object to be loaded or stored using Fragment = cutlass::Array< Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileIteratorResidualLast; /// Parameters object typename UnderlyingIterator::Params params_; public: CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given an AffineRankN<2> tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1))) {} }; private: // // Data members // /// Underlying AffineRankN<2> tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id, ///< ID of each participating thread TensorCoord const& threadblock_offset, ///< Initial offset of threadblock int const* indices = nullptr ///< gather/scatter indices, note no support for ///< gather/scatter at this specialization ) : iterator_( params.params_, pointer, layout::PitchLinearCoord(extent.row(), extent.column()), thread_id, layout::PitchLinearCoord( threadblock_offset.row(), threadblock_offset.column())) {} /// Construct a PredicatedTileIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast& operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast operator++(int) { PredicatedTileIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } CUTLASS_HOST_DEVICE void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { iterator_.get_mask(mask); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment& frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment& frag, LongIndex byte_offset) { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment& frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset) { iterator_.store_with_byte_offset(frag, byte_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileIteratorResidualLast for affine rank 2 /// row-major data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int AccessSize> class PredicatedTileIteratorResidualLast< Shape_, Element_, layout::AffineRank2RowMajor, AdvanceRank, ThreadMap_, AccessSize, false> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::AffineRank2RowMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; // Map to the underlying AffineRankN<2> layout using UnderlyingIterator = PredicatedTileIteratorResidualLast< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::AffineRankN<2>, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessSize>; using AccessType = typename UnderlyingIterator::AccessType; /// Fragment object to be loaded or stored using Fragment = cutlass::Array< Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileIteratorResidualLast; /// Parameters object typename UnderlyingIterator::Params params_; public: CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given an AffineRankN<2> tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))) {} }; private: // // Data members // /// Underlying AffineRankN<2> tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id, ///< ID of each participating thread TensorCoord const& threadblock_offset, ///< Initial offset of threadblock int const* indices = nullptr ///< gather/scatter indices, note no support for ///< gather/scatter at this specialization ) : iterator_( params.params_, pointer, layout::PitchLinearCoord(extent.column(), extent.row()), thread_id, layout::PitchLinearCoord( threadblock_offset.column(), threadblock_offset.row())) {} /// Construct a PredicatedTileIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast& operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast operator++(int) { PredicatedTileIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } CUTLASS_HOST_DEVICE void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { iterator_.get_mask(mask); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment& frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment& frag, LongIndex byte_offset) { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment& frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset) { iterator_.store_with_byte_offset(frag, byte_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileIteratorResidualLast for interleaved data. /// It is mapped to the congruous layout. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int AccessSize, int InterleavedK> class PredicatedTileIteratorResidualLast< Shape_, Element_, layout::ColumnMajorInterleaved<InterleavedK>, AdvanceRank, ThreadMap_, AccessSize, false> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; static int const kInterleavedK = InterleavedK; using Layout = layout::ColumnMajorInterleaved<kInterleavedK>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; using UnderlyingIterator = PredicatedTileIteratorResidualLast< layout::PitchLinearShape< Shape::kRow * kInterleavedK, Shape::kColumn / kInterleavedK>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessSize>; using AccessType = typename UnderlyingIterator::AccessType; /// Fragment object to be loaded or stored using Fragment = cutlass::Array< Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileIteratorResidualLast; /// Parameters object typename UnderlyingIterator::Params params_; public: CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {} CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( /// Precomputed parameters object Params const& params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const& threadblock_offset, int const* indices = nullptr ///< gather/scatter indices, note no support for ///< gather/scatter at this specialization ) : iterator_( params.params_, pointer, layout::PitchLinearCoord( extent.row() * kInterleavedK, extent.column() / kInterleavedK), thread_id, layout::PitchLinearCoord( threadblock_offset.row() * kInterleavedK, threadblock_offset.column() / kInterleavedK)) {} /// Construct a PredicatedTileIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast& operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast operator++(int) { PredicatedTileIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } CUTLASS_HOST_DEVICE void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { iterator_.get_mask(mask); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment& frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment& frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileIteratorResidualLast for interleaved-32 /// data. It is mapped to the congruous layout. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int AccessSize, int InterleavedK> class PredicatedTileIteratorResidualLast< Shape_, Element_, layout::RowMajorInterleaved<InterleavedK>, AdvanceRank, ThreadMap_, AccessSize, false> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; static int const kInterleavedK = InterleavedK; using Layout = layout::RowMajorInterleaved<kInterleavedK>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element*; using NonConstPointer = typename platform::remove_const<Element>::type*; using UnderlyingIterator = PredicatedTileIteratorResidualLast< layout::PitchLinearShape< Shape::kColumn * kInterleavedK, Shape::kRow / kInterleavedK>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessSize>; using AccessType = typename UnderlyingIterator::AccessType; /// Fragment object to be loaded or stored using Fragment = cutlass::Array< Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileIteratorResidualLast; /// Parameters object typename UnderlyingIterator::Params params_; public: CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {} CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( /// Precomputed parameters object Params const& params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const& threadblock_offset, int const* indices = nullptr ///< gather/scatter indices, note no support for ///< gather/scatter at this specialization ) : iterator_( params.params_, pointer, layout::PitchLinearCoord( extent.column() * kInterleavedK, extent.row() / kInterleavedK), thread_id, layout::PitchLinearCoord( threadblock_offset.column() * kInterleavedK, threadblock_offset.row() / kInterleavedK)) {} /// Construct a PredicatedTileIteratorResidualLast with zero threadblock /// offset CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast( Params const& params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileIteratorResidualLast( params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast& operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileIteratorResidualLast operator++(int) { PredicatedTileIteratorResidualLast self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } CUTLASS_HOST_DEVICE void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const& mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask& mask) { iterator_.get_mask(mask); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment& frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment& frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/examples/41_fused_multi_head_attention/iterators/predicated_tile_iterator_residual_last.h/0
{ "file_path": "cutlass/examples/41_fused_multi_head_attention/iterators/predicated_tile_iterator_residual_last.h", "repo_id": "cutlass", "token_count": 22507 }
10
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Metaprogram for determining the mapping of output elements to threads for epilogue tiles. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/fast_math.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// namespace detail { /// RowArrangement determines how one or more warps cover a region of consecutive rows. template < typename Shape, int WarpsRemaining, int ElementsPerAccess, int ElementSize, bool Is2dTile > struct RowArrangementBiasAct; /// RowArrangement in which each warp's access is a 1D tiled arrangement. template < typename Shape, int WarpsRemaining, int ElementsPerAccess, int ElementSize > struct RowArrangementBiasAct<Shape, WarpsRemaining, ElementsPerAccess, ElementSize, false> { static int const kWarpSize = 32; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; static int const kIterationsRow = 1; static int const kDeltaRow = 1; static int const kIterationsColumn = Shape::kColumn / kElementsPerAccess / kWarpSize; static int const kDeltaColumn = kWarpSize * kElementsPerAccess; static int const kAccessWidth = kWarpSize; static int const kAccessRows = 1; static int const kWarpPartitionsRow = 1; static int const kWarpPartitionsColumn = WarpsRemaining; }; /// RowArrangement in which each warp's access is a 2D tiled arrangement. template < typename Shape, int WarpsRemaining, int ElementsPerAccess, int ElementSize > struct RowArrangementBiasAct<Shape, WarpsRemaining, ElementsPerAccess, ElementSize, true> { static int const kMemoryAccessSize = 4;//128; static int const kWarpSize = 32; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; struct Detail { static int const kShapeRow = Shape::kRow / WarpsRemaining; static int const kShapeWidth = Shape::kColumn / kElementsPerAccess; static int const kTargetMemoryAccessWidth = kMemoryAccessSize / (kElementsPerAccess * kElementSize / 8); static int const kTargetAccessRows = kWarpSize / kTargetMemoryAccessWidth; }; static int const kAccessWidth = (Detail::kTargetAccessRows > Detail::kShapeRow ? kWarpSize / Detail::kShapeRow : const_min( Detail::kShapeWidth, const_min(kWarpSize, kMemoryAccessSize / (kElementsPerAccess * kElementSize / 8)) )); static int const kAccessRows = (Detail::kTargetAccessRows > Detail::kShapeRow ? Detail::kShapeRow : const_min(Shape::kRow, kWarpSize / kAccessWidth)); static int const kIterationsRow = Detail::kShapeRow / kAccessRows; static int const kDeltaRow = kAccessRows; static int const kIterationsColumn = Detail::kShapeWidth / kAccessWidth; static int const kDeltaColumn = kAccessWidth * kElementsPerAccess; static_assert( kAccessWidth * kElementsPerAccess <= Shape::kColumn, "Accessing too many elements per access"); static_assert( kIterationsColumn > 0, "Iteration Count Column must be > 0" ); static_assert( kIterationsRow > 0, "Iteration Count Row must be > 0" ); static int const kWarpPartitionsRow = 1; static int const kWarpPartitionsColumn = 1; }; } //////////////////////////////////////////////////////////////////////////////// /// Template metaprogram for partitioning a 4D space across warps to achieve several performance /// objectives: /// /// - coalesced memory accesses in units of 16 Byte lines /// - minimal address arithmetic /// - minimal predicate calculations /// template < typename Shape_, typename Count_, int Threads, int ElementsPerAccess, int ElementSize > struct OutputTileOptimalThreadMapBiasAct { using Shape = Shape_; using Count = Count_; static int const kWarpSize = 32; static int const kThreads = Threads; static int const kWarpCount = kThreads / kWarpSize; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; // // Metaprogram computation // struct Detail { // Clusters static int const kIterationsCluster = ((Shape::kCluster > kWarpCount) ? Shape::kCluster / kWarpCount : 1); static int const kDeltaCluster = ((Shape::kCluster > kWarpCount) ? Shape::kRow * Count::kRow * Shape::kGroup * Count::kGroup * Shape::kCluster / kIterationsCluster : 1); static int const kCompactedDeltaCluster = ((Shape::kCluster > kWarpCount) ? Shape::kRow * Shape::kGroup * Shape::kCluster / kIterationsCluster : 1); static int const kWarpPartitionsCluster = ((Shape::kCluster > kWarpCount) ? kWarpCount : kWarpCount / Shape::kCluster); static int const kWarpsRemainingForGroups = ((Shape::kCluster > kWarpCount) ? 1 : kWarpCount / Shape::kCluster); // Groups static int const kIterationsGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? Shape::kGroup / kWarpsRemainingForGroups : 1); static int const kDeltaGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? Shape::kRow * Count::kRow * Shape::kGroup / kIterationsGroup : 1); static int const kCompactedDeltaGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? Shape::kRow * Shape::kGroup / kIterationsGroup : 1); static int const kWarpPartitionsGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? 1 : kWarpsRemainingForGroups / Shape::kGroup); static int const kWarpsRemainingForRows = ((Shape::kGroup > kWarpsRemainingForGroups) ? 1 : kWarpsRemainingForGroups / Shape::kGroup); // Rows using RowArrangement = detail::RowArrangementBiasAct< Shape, kWarpsRemainingForRows, kElementsPerAccess, kElementSize, (Shape::kRow > kWarpsRemainingForRows) >; // Warp partitions using WarpPartitions = OutputTileShape< RowArrangement::kWarpPartitionsColumn, RowArrangement::kWarpPartitionsRow, kWarpPartitionsGroup, kWarpPartitionsCluster, 1>; static int const kAccessWidth = RowArrangement::kAccessWidth; static int const kAccessRows = RowArrangement::kAccessRows; }; // // Output // using Iterations = OutputTileShape< Detail::RowArrangement::kIterationsColumn, Detail::RowArrangement::kIterationsRow, Detail::kIterationsGroup, Detail::kIterationsCluster, 1>; using Delta = OutputTileShape< Detail::RowArrangement::kDeltaColumn, Detail::RowArrangement::kDeltaRow, Detail::kDeltaGroup, Detail::kDeltaCluster, 1>; /// Initial offset function CUTLASS_HOST_DEVICE static MatrixCoord initial_offset(int thread_idx) { int warp_idx = thread_idx / kWarpSize; int lane_idx = thread_idx % kWarpSize; // Compute warp location int cluster_idx = warp_idx / Detail::WarpPartitions::kCluster; int residual_cluster = warp_idx % Detail::WarpPartitions::kCluster; int group_idx = residual_cluster / Detail::WarpPartitions::kGroup; int residual_group = residual_cluster % Detail::WarpPartitions::kGroup; int row_idx = residual_group / Detail::WarpPartitions::kRow; int col_idx = residual_group % Detail::WarpPartitions::kRow; // Compute per-lane offset int lane_row_offset = lane_idx / Detail::kAccessWidth; int lane_col_offset = lane_idx % Detail::kAccessWidth; // Compute coordinate in output space int cluster_offset = cluster_idx * Shape::kRow * Count::kRow * Shape::kGroup * Count::kGroup; int group_offset = group_idx * Shape::kRow * Count::kRow; int row_offset = row_idx * Iterations::kRow * Detail::kAccessRows; int column_offset = col_idx * Iterations::kColumn * Detail::kAccessWidth * kElementsPerAccess; return MatrixCoord( cluster_offset + group_offset + row_offset + lane_row_offset, (column_offset + lane_col_offset) * kElementsPerAccess ); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass
cutlass/examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/output_tile_thread_map_for_fused_bias.h/0
{ "file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/threadblock/output_tile_thread_map_for_fused_bias.h", "repo_id": "cutlass", "token_count": 3361 }
11
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cuda_fp16.h> template <typename T> __device__ T add(T const & a, T const &b){ return (a + b); } template <> __device__ half2 add(half2 const & a, half2 const &b){ return (__hadd2(a,b)); } template <typename T> struct RELU{ __device__ T operator()(T const & a){ return a > T(0) ? a : T(0); } __device__ half2 operator()(half2 const & a){ float2 a_fp32x2 = __half22float2(a); a_fp32x2.x = a_fp32x2.x > 0.f ? a_fp32x2.x : 0.f; a_fp32x2.y = a_fp32x2.y > 0.f ? a_fp32x2.y : 0.f; if(a_fp32x2.x < 0.f || a_fp32x2.y < 0.f) printf(" %f %f\n", a_fp32x2.x ,a_fp32x2.y); return __float22half2_rn(a_fp32x2); } }; template <typename T> struct LEAKY_RELU{ __device__ T operator()(T const & a, T const & scale = half(1)){ return a > T(0) ? a : scale * a; } __device__ half2 operator()(half2 const & a, half const & scale = half(1)){ half2 zero = __half2half2(half(0)); half2 gt_zero = __hge2(a, zero); half2 le_zero = __hle2(a, zero); half2 scale_f16x2 = __half2half2(scale); half2 mask_scale_f16x2 = __hfma2(le_zero, scale_f16x2, gt_zero); return __hmul2(a, mask_scale_f16x2); } }; template <int N, int BLOCKDIM> __global__ void leaky_and_activation(half* inout, half* bias, half scale, bool mat_bias){ constexpr bool N_MOD_2 = N & 1 ? false : true; using Access_tp = typename std::conditional<N_MOD_2, half2, half>::type; constexpr int Access_elements = sizeof(Access_tp) / sizeof(half); constexpr int iter = (N + (BLOCKDIM * Access_elements) - 1 ) / (BLOCKDIM * Access_elements); LEAKY_RELU<half> Act; Access_tp src_v[iter]; Access_tp bias_v[iter]; int batch_id = blockIdx.y; int batch_offset = batch_id * gridDim.x * N; for(int i = 0; i < iter; i++){ int idx = (i * BLOCKDIM + threadIdx.x) * Access_elements; if (idx < N){ src_v[i] = *reinterpret_cast<Access_tp*>(inout + blockIdx.x * N + idx + batch_offset); if (mat_bias) bias_v[i] = *reinterpret_cast<Access_tp*>(bias + blockIdx.x * N + idx + batch_offset); else bias_v[i] = *reinterpret_cast<Access_tp*>(bias + idx + batch_id * N); *reinterpret_cast<Access_tp*>(inout + blockIdx.x * N + idx + batch_offset) = Act(add(src_v[i],bias_v[i]),scale); } } } template <int N, int BLOCKDIM> __global__ void leaky_and_activation(half* inout, half scale){ constexpr bool N_MOD_2 = N & 1 ? false : true; using Access_tp = typename std::conditional<N_MOD_2, half2, half>::type; constexpr int Access_elements = sizeof(Access_tp) / sizeof(half); constexpr int iter = (N + (BLOCKDIM * Access_elements) - 1 ) / (BLOCKDIM * Access_elements); int batch_id = blockIdx.y; int batch_offset = batch_id * gridDim.x * N; LEAKY_RELU<half> Act; Access_tp src_v[iter]; for(int i = 0; i < iter; i++){ int idx = (i * BLOCKDIM + threadIdx.x) * Access_elements; if (idx < N){ src_v[i] = *reinterpret_cast<Access_tp*>(inout + blockIdx.x * N + idx + batch_offset); *reinterpret_cast<Access_tp*>(inout + blockIdx.x * N + idx + batch_offset) = Act(src_v[i], scale); } } } template <int N, int BLOCKDIM> void leaky_and_activation(half* inout, half* bias, int m, int b, half scale, bool mat_bias){ dim3 grid(m, b); if (bias == nullptr) leaky_and_activation<N, BLOCKDIM><<<grid , BLOCKDIM>>>(inout, scale); else leaky_and_activation<N, BLOCKDIM><<<grid , BLOCKDIM>>>(inout, bias, scale, mat_bias); } template <int N, int BLOCKDIM> __global__ void relu_and_activation(half* inout, half* bias, bool mat_bias){ constexpr bool N_MOD_2 = N & 1 ? false : true; using Access_tp = typename std::conditional<N_MOD_2, half2, half>::type; constexpr int Access_elements = sizeof(Access_tp) / sizeof(half); constexpr int iter = (N + (BLOCKDIM * Access_elements) - 1 ) / (BLOCKDIM * Access_elements); RELU<half> Act; Access_tp src_v[iter]; Access_tp bias_v[iter]; int batch_id = blockIdx.y; int batch_offset = batch_id * gridDim.x * N; for(int i = 0; i < iter; i++){ int idx = (i * BLOCKDIM + threadIdx.x) * Access_elements; if (idx < N){ src_v[i] = *reinterpret_cast<Access_tp*>(inout + blockIdx.x * N + idx + batch_offset); if (mat_bias) bias_v[i] = *reinterpret_cast<Access_tp*>(bias + blockIdx.x * N + idx + batch_offset); else bias_v[i] = *reinterpret_cast<Access_tp*>(bias + idx + batch_id * N); *reinterpret_cast<Access_tp*>(inout + blockIdx.x * N + idx + batch_offset) = Act(add(src_v[i],bias_v[i])); } } } template <int N, int BLOCKDIM> __global__ void relu_and_activation(half* inout){ constexpr bool N_MOD_2 = N & 1 ? false : true; using Access_tp = typename std::conditional<N_MOD_2, half2, half>::type; constexpr int Access_elements = sizeof(Access_tp) / sizeof(half); constexpr int iter = (N + (BLOCKDIM * Access_elements) - 1 ) / (BLOCKDIM * Access_elements); int batch_id = blockIdx.y; int batch_offset = batch_id * gridDim.x * N; RELU<half> Act; Access_tp src_v[iter]; for(int i = 0; i < iter; i++){ int idx = (i * BLOCKDIM + threadIdx.x) * Access_elements; if (idx < N){ src_v[i] = *reinterpret_cast<Access_tp*>(inout + blockIdx.x * N + idx + batch_offset); *reinterpret_cast<Access_tp*>(inout + blockIdx.x * N + idx + batch_offset) = Act(src_v[i]); } } } template <int N, int BLOCKDIM> void relu_and_activation(half* inout, half* bias, int m, int b, bool mat_bias){ dim3 grid(m, b); if (bias == nullptr) relu_and_activation<N, BLOCKDIM><<<grid , BLOCKDIM>>>(inout); else relu_and_activation<N, BLOCKDIM><<<grid , BLOCKDIM>>>(inout, bias, mat_bias); } template <int N, int BLOCKDIM> __global__ void identity_and_activation(half* inout, half* bias, bool mat_bias){ constexpr bool N_MOD_2 = N & 1 ? false : true; using Access_tp = typename std::conditional<N_MOD_2, half2, half>::type; constexpr int Access_elements = sizeof(Access_tp) / sizeof(half); constexpr int iter = (N + (BLOCKDIM * Access_elements) - 1 ) / (BLOCKDIM * Access_elements); int batch_id = blockIdx.y; int batch_offset = batch_id * gridDim.x * N; Access_tp src_v[iter]; Access_tp bias_v[iter]; for(int i = 0; i < iter; i++){ int idx = (i * BLOCKDIM + threadIdx.x) * Access_elements; if (idx < N){ src_v[i] = *reinterpret_cast<Access_tp*>(inout + blockIdx.x * N + idx + batch_offset); if (mat_bias) bias_v[i] = *reinterpret_cast<Access_tp*>(bias + blockIdx.x * N + idx + batch_offset); else bias_v[i] = *reinterpret_cast<Access_tp*>(bias + idx + batch_id * N); *reinterpret_cast<Access_tp*>(inout + blockIdx.x * N + idx + batch_offset) = (add(src_v[i],bias_v[i])); } } } template <int N, int BLOCKDIM> __global__ void identity_and_activation(half* inout){ constexpr bool N_MOD_2 = N & 1 ? false : true; using Access_tp = typename std::conditional<N_MOD_2, half2, half>::type; constexpr int Access_elements = sizeof(Access_tp) / sizeof(half); constexpr int iter = (N + (BLOCKDIM * Access_elements) - 1 ) / (BLOCKDIM * Access_elements); int batch_id = blockIdx.y; int batch_offset = batch_id * gridDim.x * N; Access_tp src_v[iter]; for(int i = 0; i < iter; i++){ int idx = (i * BLOCKDIM + threadIdx.x) * Access_elements; if (idx < N){ src_v[i] = *reinterpret_cast<Access_tp*>(inout + blockIdx.x * N + idx + batch_offset); *reinterpret_cast<Access_tp*>(inout + blockIdx.x * N + idx + batch_offset) = (src_v[i]); } } } template <int N, int BLOCKDIM> void identity_and_activation(half* inout, half* bias, int m, int b, bool mat_bias){ dim3 grid(m, b); if (bias == nullptr) identity_and_activation<N, BLOCKDIM><<<grid , BLOCKDIM>>>(inout); else identity_and_activation<N, BLOCKDIM><<<grid , BLOCKDIM>>>(inout, bias, mat_bias); }
cutlass/examples/44_multi_gemm_ir_and_codegen/leaky_bias.h/0
{ "file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/leaky_bias.h", "repo_id": "cutlass", "token_count": 4407 }
12
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*************************************************************************************************** Example contrasting the Stream-K parallel decomposition for GEMM threadblocks versus the "classic data-parallel" and "Split-K" decompositions. For more details regarding the Stream-K method, see "Stream-K: Work-centric Parallel Decomposition for Dense Matrix-Matrix Multiplication on the GPU" (https://arxiv.org/abs/2301.03598) Requires NVIDIA Ampere or newer device (SM80+). - To lock persistence mode, power (400W), clocks (1005MHz) for evaluation (assumes device 0 and A100) cutlass$ sudo nvidia-smi -pm 1 -i 0 cutlass$ sudo nvidia-smi -i 0 -pl 400 cutlass$ sudo nvidia-smi -i 0 -lgc 1005 - Build and run: cutlass$ mkdir build cutlass$ cd build cutlass/build$ cmake .. -DCUTLASS_NVCC_ARCHS=80 cutlass/build$ make 47_ampere_gemm_universal_streamk cutlass/build$ ./examples/47_ampere_gemm_universal_streamk/47_ampere_gemm_universal_streamk 10000 timing iterations of 2048 x 2048 x 2048 matrix-matrix multiply Basic data-parallel GEMM Disposition: Passed Avg runtime: 0.112633 ms GFLOPs: 152530 StreamK GEMM with default load-balancing Disposition: Passed Avg runtime: 0.0941929 ms GFLOPs: 182390 Speedup vs Basic-DP: 1.196 StreamK emulating basic data-parallel GEMM Disposition: Passed Avg runtime: 0.113119 ms GFLOPs: 151875 Speedup vs Basic-DP: 0.996 Basic split-K GEMM with tile-splitting factor 2 Disposition: Passed Avg runtime: 0.104772 ms GFLOPs: 163973 StreamK emulating Split-K GEMM with tile-splitting factor 2 Disposition: Passed Avg runtime: 0.105379 ms GFLOPs: 163029 Speedup vs Basic-SplitK: 0.994 **************************************************************************************************/ #include <iostream> #include <string> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_universal.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// GEMM kernel configurations (cutlass_tensorop_h16816gemm_128x128_32x4_nn_align8) ///////////////////////////////////////////////////////////////////////////////////////////////// // A matrix configuration using ElementA = cutlass::half_t; // Element type for A matrix operand using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes) // B matrix configuration using ElementB = cutlass::half_t; // Element type for B matrix operand using LayoutB = cutlass::layout::RowMajor; // Layout type for B matrix operand constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes) // C/D matrix configuration using ElementC = cutlass::half_t; // Element type for C and D matrix operands using LayoutC = cutlass::layout::RowMajor; // Layout type for C and D matrix operands constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value; // Memory access granularity/alignment of C/D matrices in units of elements (up to 16 bytes) // Multiply-accumulate blocking/pipelining details using ElementAccumulator = cutlass::half_t; // Element type for internal accumulation using ArchTag = cutlass::arch::Sm80; // Tag indicating the minimum SM that supports the intended feature using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock-level tile size (concept: GemmShape) using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp-level tile size (concept: GemmShape) using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // Instruction-level tile size (concept: GemmShape) constexpr int NumStages = 4; // Number of global->shared pipeline stages used in the GEMM mainloop // Epilogue output operator using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementC, // Element type for C and D matrix operands AlignmentC, // Memory access granularity of C and D matrix in units of elements ElementAccumulator, // Element type from internal accumaccumulation ElementAccumulator>; // Data type used to compute linear combination // Reference device GEMM implementation type using DeviceGemmReference = cutlass::reference::device::Gemm< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, ElementAccumulator>; // Classic data-parallel device GEMM implementation type using DeviceGemmBasic = cutlass::gemm::device::GemmUniversal< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, NumStages, AlignmentA, AlignmentB>; // StreamK device GEMM implementation type using DeviceGemmStreamK = cutlass::gemm::device::GemmUniversal< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::ThreadblockSwizzleStreamK, // <-- Only difference NumStages, AlignmentA, AlignmentB>; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Testbed utility types ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double avg_runtime_ms; double gflops; cutlass::Status status; cudaError_t error; bool passed; Result( double avg_runtime_ms = 0, double gflops = 0, cutlass::Status status = cutlass::Status::kSuccess, cudaError_t error = cudaSuccess) : avg_runtime_ms(avg_runtime_ms), gflops(gflops), status(status), error(error), passed(true) {} }; /// Command line options parsing struct Options { std::string command_name; bool help; cutlass::gemm::GemmCoord problem_size; float alpha; float beta; int split_k_factor; int avail_sms; bool reference_check; int iterations; cutlass::HostTensor<ElementA, LayoutA> tensor_a; cutlass::HostTensor<ElementB, LayoutB> tensor_b; cutlass::HostTensor<ElementC, LayoutC> tensor_c; cutlass::HostTensor<ElementC, LayoutC> tensor_d; cutlass::HostTensor<ElementC, LayoutC> tensor_ref_d; Options(std::string command_name) : command_name(command_name), help(false), problem_size({2048, 2048, 2048}), alpha(1.0f), beta(0.0f), split_k_factor(1), avail_sms(-1), // Number of device SMs to use is unlimited reference_check(true), iterations(10000) {} bool valid() const { return true; } void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("split", split_k_factor); cmd.get_cmd_line_argument("iterations", iterations); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "Performs a GEMM computation.\n" << "\n" << "Options:\n" << "\n" << " --help If specified, displays this usage statement.\n\n" << " --m=<int> GEMM M dimension\n" << " --n=<int> GEMM N dimension\n" << " --k=<int> GEMM K dimension\n" << " --alpha=<f32> Epilogue scalar alpha\n" << " --beta=<f32> Epilogue scalar beta\n\n" << " --split=<int> Split-K factor to emulate\n\n" << " --iterations=<int> Number of profiling iterations to perform.\n\n"; out << "\n\nExamples:\n\n" << "$ " << command_name << " --m=1024 --n=512 --k=1024 --alpha=2 --beta=0.707 \n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Two flops per multiply-add return 2.0 * double(problem_size.product()) / double(1.0e9) / runtime_s; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// GEMM evaluation ///////////////////////////////////////////////////////////////////////////////////////////////// /// Populates a DeviceGemmBasic::Arguments structure from the given commandline options typename DeviceGemmBasic::Arguments args_from_options( const DeviceGemmBasic &device_gemm, const Options &options, cutlass::HostTensor<ElementA, LayoutA> &tensor_a, cutlass::HostTensor<ElementB, LayoutB> &tensor_b, cutlass::HostTensor<ElementC, LayoutC> &tensor_c, cutlass::HostTensor<ElementC, LayoutC> &tensor_d) { return typename DeviceGemmBasic::Arguments( cutlass::gemm::GemmUniversalMode::kGemm, // universal mode options.problem_size, // problem_size options.split_k_factor, // batch count / splitk slices { // epilogue parameters ElementAccumulator(options.alpha), ElementAccumulator(options.beta) }, tensor_a.device_data(), // ptr_A tensor_b.device_data(), // ptr_B tensor_c.device_data(), // ptr_C tensor_d.device_data(), // ptr_D options.problem_size.mk().product(), // batch_stride_A options.problem_size.nk().product(), // batch_stride_B options.problem_size.mn().product(), // batch_stride_C options.problem_size.mn().product(), // batch_stride_D tensor_a.layout().stride(0), // stride_a tensor_b.layout().stride(0), // stride_b tensor_c.layout().stride(0), // stride_c tensor_d.layout().stride(0)); // stride_d } /// Populates a DeviceGemmStreamK::Arguments structure from the given commandline options typename DeviceGemmStreamK::Arguments args_from_options( const DeviceGemmStreamK &device_gemm, const Options &options, cutlass::HostTensor<ElementA, LayoutA> &tensor_a, cutlass::HostTensor<ElementB, LayoutB> &tensor_b, cutlass::HostTensor<ElementC, LayoutC> &tensor_c, cutlass::HostTensor<ElementC, LayoutC> &tensor_d) { return typename DeviceGemmStreamK::Arguments( cutlass::gemm::GemmUniversalMode::kGemm, // universal mode options.problem_size, // problem_size options.split_k_factor, // batch count / splitk slices { // epilogue parameters ElementAccumulator(options.alpha), ElementAccumulator(options.beta) }, tensor_a.device_data(), // ptr_A tensor_b.device_data(), // ptr_B tensor_c.device_data(), // ptr_C tensor_d.device_data(), // ptr_D options.problem_size.mk().product(), // batch_stride_A options.problem_size.nk().product(), // batch_stride_B options.problem_size.mn().product(), // batch_stride_C options.problem_size.mn().product(), // batch_stride_D tensor_a.layout().stride(0), // stride_a tensor_b.layout().stride(0), // stride_b tensor_c.layout().stride(0), // stride_c tensor_d.layout().stride(0), // stride_d options.avail_sms); // avail_sms } /// Execute a given example GEMM computation template <typename DeviceGemmT> Result run(std::string description, Options &options) { // Display test description std::cout << std::endl << description << std::endl; // Zero-initialize test output matrix D cutlass::reference::host::TensorFill(options.tensor_d.host_view()); options.tensor_d.sync_device(); // Instantiate CUTLASS kernel depending on templates DeviceGemmT device_gemm; // Create a structure of gemm kernel arguments suitable for invoking an instance of DeviceGemmT auto arguments = args_from_options(device_gemm, options, options.tensor_a, options.tensor_b, options.tensor_c, options.tensor_d); // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = DeviceGemmT::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Check the problem size is supported or not CUTLASS_CHECK(device_gemm.can_implement(arguments)); // Initialize CUTLASS kernel with arguments and workspace pointer CUTLASS_CHECK(device_gemm.initialize(arguments, workspace.get())); // Correctness / Warmup iteration CUTLASS_CHECK(device_gemm()); // Copy output data from CUTLASS and reference kernel to host for comparison options.tensor_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not Result result; result.passed = cutlass::reference::host::TensorEquals( options.tensor_d.host_view(), options.tensor_ref_d.host_view()); std::cout << " Disposition: " << (result.passed ? "Passed" : "Failed") << std::endl; // Run profiling loop if (options.iterations > 0) { GpuTimer timer; timer.start(); for (int iter = 0; iter < options.iterations; ++iter) { CUTLASS_CHECK(device_gemm()); } timer.stop(); // Compute average runtime and GFLOPs. float elapsed_ms = timer.elapsed_millis(); result.avg_runtime_ms = double(elapsed_ms) / double(options.iterations); result.gflops = options.gflops(result.avg_runtime_ms / 1000.0); std::cout << " Avg runtime: " << result.avg_runtime_ms << " ms" << std::endl; std::cout << " GFLOPs: " << result.gflops << std::endl; } if (!result.passed) { exit(-1); } return result; } /// Program entrypoint int main(int argc, const char **argv) { // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } // Current device must must have compute capability at least 80 cudaDeviceProp props; int current_device_id; CUDA_CHECK(cudaGetDevice(&current_device_id)); CUDA_CHECK(cudaGetDeviceProperties(&props, current_device_id)); if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } // Parse commandline options Options options("ampere_streamk_gemm"); options.parse(argc, argv); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } std::cout << options.iterations << " timing iterations of " << options.problem_size.m() << " x " << options.problem_size.n() << " x " << options.problem_size.k() << " matrix-matrix multiply" << std::endl; if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } // // Initialize GEMM datasets // // Initialize tensors using CUTLASS helper functions options.tensor_a.resize(options.problem_size.mk()); // <- Create matrix A with dimensions M x K options.tensor_b.resize(options.problem_size.kn()); // <- Create matrix B with dimensions K x N options.tensor_c.resize(options.problem_size.mn()); // <- Create matrix C with dimensions M x N options.tensor_d.resize(options.problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from CUTLASS kernel options.tensor_ref_d.resize(options.problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from reference kernel // Fill matrix A on host with uniform-random data [-2, 2] cutlass::reference::host::TensorFillRandomUniform( options.tensor_a.host_view(), 1, ElementA(2), ElementA(-2), 0); // Fill matrix B on host with uniform-random data [-2, 2] cutlass::reference::host::TensorFillRandomUniform( options.tensor_b.host_view(), 1, ElementB(2), ElementB(-2), 0); // Fill matrix C on host with uniform-random data [-2, 2] cutlass::reference::host::TensorFillRandomUniform( options.tensor_c.host_view(), 1, ElementC(2), ElementC(-2), 0); // // Compute reference output // // Copy data from host to GPU options.tensor_a.sync_device(); options.tensor_b.sync_device(); options.tensor_c.sync_device(); // Zero-initialize reference output matrix D cutlass::reference::host::TensorFill(options.tensor_ref_d.host_view()); options.tensor_ref_d.sync_device(); // Create instantiation for device reference gemm kernel DeviceGemmReference gemm_reference; // Launch device reference gemm kernel gemm_reference( options.problem_size, ElementAccumulator(options.alpha), options.tensor_a.device_ref(), options.tensor_b.device_ref(), ElementAccumulator(options.beta), options.tensor_c.device_ref(), options.tensor_ref_d.device_ref()); // Wait for kernels to finish CUDA_CHECK(cudaDeviceSynchronize()); // Copy output data from reference kernel to host for comparison options.tensor_ref_d.sync_host(); // // Evaluate CUTLASS kernels // // Test default operation if (options.split_k_factor == 1) { // Compare basic data-parallel version versus StreamK version using default load-balancing heuristics Result basic_dp = run<DeviceGemmBasic>("Basic data-parallel GEMM", options); Result streamk_default = run<DeviceGemmStreamK>("StreamK GEMM with default load-balancing", options); printf(" Speedup vs Basic-DP: %.3f\n", (basic_dp.avg_runtime_ms / streamk_default.avg_runtime_ms)); // Show that StreamK can emulate basic data-parallel GEMM when we set the number of SMs to load-balance across = 1 options.avail_sms = 1; // Set loadbalancing width to 1 SM (no load balancing) Result streamk_dp = run<DeviceGemmStreamK>("StreamK emulating basic data-parallel GEMM", options); options.avail_sms = -1; // Reset loadbalancing width to unspecified SMs (i.e., the number of device SMs) printf(" Speedup vs Basic-DP: %.3f\n", (basic_dp.avg_runtime_ms / streamk_dp.avg_runtime_ms)); options.split_k_factor++; // Increment splitting factor for next evaluation } // Show that StreamK can emulate "Split-K" with a tile-splitting factor Result basic_splitk = run<DeviceGemmBasic>( std::string("Basic split-K GEMM with tile-splitting factor ") + std::to_string(options.split_k_factor), options); Result streamk_splitk = run<DeviceGemmStreamK>( std::string("StreamK emulating Split-K GEMM with tile-splitting factor ") + std::to_string(options.split_k_factor), options); printf(" Speedup vs Basic-SplitK: %.3f\n", (basic_splitk.avg_runtime_ms / streamk_splitk.avg_runtime_ms)); return 0; }
cutlass/examples/47_ampere_gemm_universal_streamk/ampere_gemm_universal_streamk.cu/0
{ "file_path": "cutlass/examples/47_ampere_gemm_universal_streamk/ampere_gemm_universal_streamk.cu", "repo_id": "cutlass", "token_count": 8563 }
13
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Hopper GEMM+permute example. This example demonstrates the fusion of tensor permutation operations with a Hopper GEMM kernel. It is similar in spirit to example 39_gemm_permute, but uses CUTLASS 3 CollectiveBuilder API to construct kernels that make use of Hopper architecture features: Tensor Memory Accelerator (TMA) units and warpgroup-level MMA instructions. Background ---------- While a GEMM kernel computes a product of two matrices (rank-2 tensors), the source data may come from higher-rank tensors by combining some if its modes (dimensions) into the row and column modes of the matrix. These tensors are often outputs from previous layers of a network, and the data may sometimes need to be reordered in memory before a GEMM is computed. Similarly, the output of a GEMM may need to be reordered before a subsequent operation can be executed. Consider this sample PyTorch code: # Forward pass D = torch.mm(A, B).view(M/D1, D1, D2, N/D2).permute(0, 2, 1, 3) # Backward pass grad_A = torch.mm(grad_D.permute(0, 2, 1, 3).view(M, N), B) Executing the reordering as a separate operation requires committing intermediate tensor to memory and increases the latency and memory footprint of the model. By fusing the permutation with either reading of A/B matrices or writing of D matrix, we can avoid the unnecessary global memory traffic and kernel launch overhead. Implementation -------------- The approach relies on two things: - The ability of CUTLASS 3 to naturally perform general tensor contractions (GETT) owing to the flexibility of CuTe's hierarchical layouts (see example 51_hopper_gett for more details). - The harware capabilities of Hopper TMA units that allow for loading multidimensional tensors with (almost) arbitrary strides, which can be used to represent a permuted view of the data. In this example we reuse the permutation classes of examples 39_gemm_permute as operation tags. For each tag, a specialization of struct PermuteTraits<> provides the necessary information about the target tensor shape and ordering of modes. The main class, ExampleRunner, then figures out the overall (hierarchical) shape of the GEMM operation and computes the shape and strides for each tensor taking into account the permutation applied. We highlight the importance of specifying consistent multidimensional shapes for all tensors (even those that are not permuted), as well as choosing hierarchical GEMM tile sizes that best fit those shapes (in cases where some tensor dimensions are known at compile time). In addition, this example implements a standalone permutation kernel that is used to both verify correctness of the fused kernel and benchmark the fused kernel against an unfused version that writes intermediate tensor to memory. */ #include "cutlass/arch/arch.h" #include "cutlass/arch/mma.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/permute.h" #include "cutlass/util/command_line.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/packed_stride.hpp" #include "cutlass/util/reference/device/tensor_fill.h" #include "cutlass/util/reference/device/tensor_compare.h" #include "cutlass/gemm/device/gemm_universal_adapter.h" #include "cutlass/gemm/kernel/gemm_universal.hpp" #include "cutlass/gemm/collective/collective_builder.hpp" #include "cutlass/epilogue/collective/collective_builder.hpp" #include "cutlass/epilogue/collective/collective_epilogue.hpp" #include "cutlass/epilogue/thread/linear_combination.h" #include "helper.h" #include "permute_kernel.cuh" #include "permute_traits.hpp" namespace example { #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) struct Options { bool help; cutlass::gemm::BatchedGemmCoord problem_size; float alpha; float beta; bool reference_check; int iterations; bool verbose; Options(): help(false), problem_size({2048, 2048, 2048, 8}), alpha(1.0), beta(1.0), reference_check(true), iterations(20), verbose(false) { } bool valid() const { return problem_size.m() > 0 && problem_size.n() > 0 && problem_size.k() > 0 && problem_size.batch() > 0 && iterations > 0; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("batch_size", problem_size.batch()); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("check", reference_check, true); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("verbose", verbose, false); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "53_hopper_gemm_permute example\n" "\n" " This example uses the CUTLASS Library to fuse permute() on input/output tensors with GEMM\n" "\n" "Options:\n" " --help If specified, displays this usage statement.\n" " --m=<int> GEMM M dimension\n" " --n=<int> GEMM N dimension\n" " --k=<int> GEMM K dimension\n" " --alpha=<float> GEMM alpha parameter\n" " --beta=<float> GEMM beta parameter\n" " --iterations=<int> Number of profiling iterations to perform.\n" " --check=<bool> Validate results against a reference (unfused) imlementation" " --verbose=<bool> Enable verbose output" "\n" "Examples:\n" "\n" "$ ./examples/53_hopper_gemm_permute/53_hopper_gemm_permute --m=4096 --n=2048 --k=3072 --batch_size=8\n"; return out; } }; using namespace cute; // Check the shapes assigned to the same mode of different tensors, // ensure all permuted shapes are the same and return that shape. template<class ... Shapes> auto select_mode_shape(Shapes const & ... shapes) { auto permuted_shapes = filter_tuple(cute::make_tuple(shapes...), [](auto shape) { if constexpr (cute::rank(shape) > 1) { return cute::make_tuple(shape); } else { return cute::make_tuple(); } }); if constexpr (cute::rank(permuted_shapes) == 0) { return get<0>(cute::make_tuple(shapes...)); } else { auto ref_shape = get<0>(permuted_shapes); for_each(permuted_shapes, [&](auto shape) { // This static assert fails to compile on GCC 7.5 // static_assert(is_same<decltype(shape), decltype(ref_shape)>::value, "Inconsistent shapes for the same mode"); // This runtime check can be skipped if all permutations are required to be static. if (shape != ref_shape) { print("Inconsistent shapes for the same mode: "); print(ref_shape); print(" and "); print(shape); print("\n"); exit(EXIT_FAILURE); } }); return ref_shape; } } template<class Shape, class StrideOrig> auto compute_default_stride(Shape const & shape, StrideOrig const & stride_orig) { // Only supports column-major and row-major, batch stride always comes last if constexpr (is_constant<1, decltype(get<0>(stride_orig))>::value) { return compact_col_major(shape); } else { return compact_order(shape, Step<_1,_0,_2>{}); } } // Divide a static scalar TileSize into static modes of Shape until either: // - a dynamic mode is encountered // - we run out of size to divide // - no longer divisible by next shape // Examples: // select_tile_shape(_128, (_8,_16)) -> (_8,_16) // select_tile_shape(_128, (_8,_32)) -> (_8,_16) // select_tile_shape(_128, (_8, _4)) -> (_8,_4,_4) // select_tile_shape(_128, (_8, 4)) -> (_8,_16) template<class TileSize, class Shape> auto select_tile_shape(TileSize size, Shape const& shape) { static_assert(is_static<TileSize>::value, "Tile size must be static"); if constexpr (cute::rank(Shape{}) == 0) { return cute::make_tuple(size); } else { if constexpr (is_static<tuple_element_t<0, Shape>>::value) { auto div = front(shape); if constexpr (size > div and size % div == 0) { return prepend(select_tile_shape(size / div, take<1,tuple_size_v<Shape>>(shape)), div); } else { return cute::make_tuple(size); } } else { return cute::make_tuple(size); } } } template<class ElementA, class LayoutA, class PermuteA, class ElementB, class LayoutB, class PermuteB, class ElementC, class LayoutC, class PermuteC, class ElementD, class LayoutD, class PermuteD, class ElementAccumulator, class ElementEpilogue, class TileShape, class ClusterShape> class ExampleRunner { private: // Define shapes for each operand and original GEMM problem as a whole. using MatrixShape = Shape<int,int,int>; // [M,N,L]/[M,K,L]/[N,K,L] using ProblemShape = Shape<int,int,int,int>; // [M,N,K,L] // Determine the CuTe stride for each of the four operands. using StrideA = cutlass::gemm::TagToStrideA_t<LayoutA>; using StrideB = cutlass::gemm::TagToStrideB_t<LayoutB>; using StrideC = cutlass::gemm::TagToStrideC_t<LayoutC>; using StrideD = cutlass::gemm::TagToStrideC_t<LayoutD>; // Flags to check which operands will be permuted. static constexpr bool DoPermuteA = not cutlass::layout::is_trivial_permute<PermuteA>; static constexpr bool DoPermuteB = not cutlass::layout::is_trivial_permute<PermuteB>; static constexpr bool DoPermuteC = not cutlass::layout::is_trivial_permute<PermuteC>; static constexpr bool DoPermuteD = not cutlass::layout::is_trivial_permute<PermuteD>; // For input operands, we must use inverse of the permutation operation // to read data that is stored in original (un-permuted) order. using PermuteAReal = typename cutlass::layout::InversePermute<PermuteA>::type; using PermuteBReal = typename cutlass::layout::InversePermute<PermuteB>::type; using PermuteCReal = typename cutlass::layout::InversePermute<PermuteC>::type; using PermuteDReal = PermuteD; // Get permutation layout for each operand. // A permutation layout is a rank-3 layout in the usual CuTe mode ordering, // but each mode may have a nested shape corresponding to the reshaping of // the matrix into a multidimensional tensor, and the strides are computed // taking the desired permutation into account. template<class Permute, class Stride, bool Transpose> using LayoutPermute = remove_cvref_t<decltype(make_permute_layout<Permute, Transpose>(make_layout(MatrixShape{}, Stride{})))>; using LayoutAP = LayoutPermute<PermuteAReal, StrideA, false>; using LayoutBP = LayoutPermute<PermuteBReal, StrideB, true >; using LayoutCP = LayoutPermute<PermuteCReal, StrideC, false>; using LayoutDP = LayoutPermute<PermuteDReal, StrideD, false>; // Now we want to build the unified problem shape for permute-GEMM. // To do this, we check the corresponding mode in each tensor that has it. // If at least one tensor has a mode that has been reshaped (i.e. rank > 1), // its shape will be used as the reference shape for that mode in all tensors. // If multiple tensors have reshaped mode, we additionally check that their // shapes for that mode match. Otherwise, we can't define a consistent GEMM shape. using ShapeM = decltype(select_mode_shape(shape<0>(LayoutAP{}), shape<0>(LayoutCP{}), shape<0>(LayoutDP{}))); using ShapeN = decltype(select_mode_shape(shape<0>(LayoutBP{}), shape<1>(LayoutCP{}), shape<1>(LayoutDP{}))); using ShapeK = decltype(select_mode_shape(shape<1>(LayoutAP{}), shape<1>(LayoutBP{}))); using ShapeL = decltype(select_mode_shape(shape<2>(LayoutAP{}), shape<2>(LayoutBP{}), shape<2>(LayoutCP{}), shape<2>(LayoutDP{}))); using ProblemShapePermute = Shape<ShapeM, ShapeN, ShapeK, ShapeL>; using ShapeAPermute = Shape<ShapeM, ShapeK, ShapeL>; using ShapeBPermute = Shape<ShapeN, ShapeK, ShapeL>; using ShapeCPermute = Shape<ShapeM, ShapeN, ShapeL>; using ShapeDPermute = Shape<ShapeM, ShapeN, ShapeL>; // Next, we must define the strides for each tensor. // If the tensor is permuted, we take the strides produced by the permutation function. // Otherwise, we compute default strides induced by the new (multidimensional) shape of the tensor. // // This won't always work in general if multiple tensors are permuted: e.g. if PermuteA affects // modes M and K, and PermuteB affects modes N and L, the single stride for mode L of tensor A // computed by PermuteA will be non-congruent with it's shape that is changed by PermuteB. // To handle this correctly, a more complicated logic is needed to reconstruct multi-mode strides. // This is not addressed here, as it's not a common requirement to permute multiple tensors in one GEMM. using StrideAPermute = conditional_t<DoPermuteA, remove_cvref_t<decltype(stride(LayoutAP{}))>, decltype(compute_default_stride(ShapeAPermute{}, StrideA{}))>; using StrideBPermute = conditional_t<DoPermuteB, remove_cvref_t<decltype(stride(LayoutBP{}))>, decltype(compute_default_stride(ShapeBPermute{}, StrideB{}))>; using StrideCPermute = conditional_t<DoPermuteC, remove_cvref_t<decltype(stride(LayoutCP{}))>, decltype(compute_default_stride(ShapeCPermute{}, StrideC{}))>; using StrideDPermute = conditional_t<DoPermuteD, remove_cvref_t<decltype(stride(LayoutDP{}))>, decltype(compute_default_stride(ShapeDPermute{}, StrideD{}))>; // We need to select optimal tile shape based on the tile size specified by the user. // This is done by dividing the tile size in each mode by the mode shape as much // as possible (i.e. until we run out of tile size or encounter a dynamic sub-shape). using TileMPermute = decltype(select_tile_shape(get<0>(TileShape{}), ShapeM{})); using TileNPermute = decltype(select_tile_shape(get<1>(TileShape{}), ShapeN{})); using TileKPermute = decltype(select_tile_shape(get<2>(TileShape{}), ShapeK{})); using TileShapePermute = Shape<TileMPermute, TileNPermute, TileKPermute>; // Now we are ready to define the GEMM kernel types for both fused permute and reference paths. using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape, ClusterShape, cutlass::epilogue::collective::EpilogueTileAuto, ElementAccumulator, ElementEpilogue, ElementC, StrideC, 128 / cutlass::sizeof_bits<ElementC>::value, ElementD, StrideD, 128 / cutlass::sizeof_bits<ElementD>::value, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using CollectiveEpiloguePermute = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShapePermute, ClusterShape, cutlass::epilogue::collective::EpilogueTileAuto, ElementAccumulator, ElementEpilogue, ElementC, StrideCPermute, 128 / cutlass::sizeof_bits<ElementC>::value, ElementD, StrideDPermute, 128 / cutlass::sizeof_bits<ElementD>::value, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, StrideA, 128 / cutlass::sizeof_bits<ElementA>::value, ElementB, StrideB, 128 / cutlass::sizeof_bits<ElementB>::value, ElementAccumulator, TileShape, ClusterShape, cutlass::gemm::collective::StageCountAutoCarveout< static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::collective::KernelScheduleAuto >::CollectiveOp; using CollectiveMainloopPermute = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, StrideAPermute, 128 / cutlass::sizeof_bits<ElementA>::value, ElementB, StrideBPermute, 128 / cutlass::sizeof_bits<ElementB>::value, ElementAccumulator, TileShapePermute, ClusterShape, cutlass::gemm::collective::StageCountAutoCarveout< static_cast<int>(sizeof(typename CollectiveEpiloguePermute::SharedStorage))>, cutlass::gemm::collective::KernelScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< ProblemShape, CollectiveMainloop, CollectiveEpilogue >; using GemmKernelPermute = cutlass::gemm::kernel::GemmUniversal< ProblemShapePermute, CollectiveMainloopPermute, CollectiveEpiloguePermute >; using GemmReference = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; using GemmPermute = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelPermute>; // Data members cutlass::gemm::BatchedGemmCoord problem_size; ProblemShape problem_shape; cutlass::KernelHardwareInfo hw_info; ElementEpilogue alpha; ElementEpilogue beta; MatrixShape shape_A; MatrixShape shape_B; MatrixShape shape_C; MatrixShape shape_D; StrideA stride_A; StrideB stride_B; StrideC stride_C; StrideD stride_D; LayoutAP layout_AP; LayoutBP layout_BP; LayoutCP layout_CP; LayoutDP layout_DP; ShapeM shape_M; ShapeN shape_N; ShapeK shape_K; ShapeL shape_L; ProblemShapePermute problem_shape_permute; ShapeAPermute shape_A_permute; ShapeBPermute shape_B_permute; ShapeCPermute shape_C_permute; ShapeDPermute shape_D_permute; StrideAPermute stride_A_permute; StrideBPermute stride_B_permute; StrideCPermute stride_C_permute; StrideDPermute stride_D_permute; cutlass::device_memory::allocation<ElementA> tensor_a; cutlass::device_memory::allocation<ElementB> tensor_b; cutlass::device_memory::allocation<ElementC> tensor_c; cutlass::device_memory::allocation<ElementD> tensor_d; cutlass::device_memory::allocation<ElementA> tensor_a_permuted; cutlass::device_memory::allocation<ElementB> tensor_b_permuted; cutlass::device_memory::allocation<ElementC> tensor_c_permuted; cutlass::device_memory::allocation<ElementD> tensor_d_unpermuted; cutlass::device_memory::allocation<ElementD> tensor_d_reference; cutlass::gemm::GemmUniversalMode gemm_mode; GemmPermute gemm_permute; typename GemmPermute::Arguments arguments_permute; cutlass::device_memory::allocation<uint8_t> workspace_permute; GemmReference gemm_reference; typename GemmReference::Arguments arguments_reference; cutlass::device_memory::allocation<uint8_t> workspace_reference; public: ExampleRunner(Options const & options, cutlass::KernelHardwareInfo const & hw_info) : problem_size(options.problem_size), problem_shape(problem_size.m(), problem_size.n(), problem_size.k(), problem_size.batch()), hw_info(hw_info), alpha(options.alpha), beta(options.beta), shape_A(make_shape(problem_size.m(), problem_size.k(), problem_size.batch())), shape_B(make_shape(problem_size.n(), problem_size.k(), problem_size.batch())), shape_C(make_shape(problem_size.m(), problem_size.n(), problem_size.batch())), shape_D(make_shape(problem_size.m(), problem_size.n(), problem_size.batch())), stride_A(cutlass::make_cute_packed_stride(StrideA{}, shape_A)), stride_B(cutlass::make_cute_packed_stride(StrideB{}, shape_B)), stride_C(cutlass::make_cute_packed_stride(StrideC{}, shape_C)), stride_D(cutlass::make_cute_packed_stride(StrideD{}, shape_D)), layout_AP(make_permute_layout<PermuteAReal, false>(make_layout(shape_A, stride_A))), layout_BP(make_permute_layout<PermuteBReal, true >(make_layout(shape_B, stride_B))), layout_CP(make_permute_layout<PermuteCReal, false>(make_layout(shape_C, stride_C))), layout_DP(make_permute_layout<PermuteDReal, false>(make_layout(shape_D, stride_D))), shape_M(select_mode_shape(shape<0>(layout_AP), shape<0>(layout_CP), shape<0>(layout_DP))), shape_N(select_mode_shape(shape<0>(layout_BP), shape<1>(layout_CP), shape<1>(layout_DP))), shape_K(select_mode_shape(shape<1>(layout_AP), shape<1>(layout_BP))), shape_L(select_mode_shape(shape<2>(layout_AP), shape<2>(layout_BP), shape<2>(layout_CP), shape<2>(layout_DP))), problem_shape_permute(shape_M, shape_N, shape_K, shape_L), shape_A_permute(make_shape(shape_M, shape_K, shape_L)), shape_B_permute(make_shape(shape_N, shape_K, shape_L)), shape_C_permute(make_shape(shape_M, shape_N, shape_L)), shape_D_permute(make_shape(shape_M, shape_N, shape_L)), stride_A_permute(conditional_return<DoPermuteA>(layout_AP.stride(), compute_default_stride(shape_A_permute, stride_A))), stride_B_permute(conditional_return<DoPermuteB>(layout_BP.stride(), compute_default_stride(shape_B_permute, stride_B))), stride_C_permute(conditional_return<DoPermuteC>(layout_CP.stride(), compute_default_stride(shape_C_permute, stride_C))), stride_D_permute(conditional_return<DoPermuteD>(layout_DP.stride(), compute_default_stride(shape_D_permute, stride_D))), tensor_a(problem_size.m() * problem_size.k() * problem_size.batch()), tensor_b(problem_size.k() * problem_size.n() * problem_size.batch()), tensor_c(problem_size.m() * problem_size.n() * problem_size.batch()), tensor_d(problem_size.m() * problem_size.n() * problem_size.batch()), tensor_a_permuted(problem_size.m() * problem_size.k() * problem_size.batch()), tensor_b_permuted(problem_size.k() * problem_size.n() * problem_size.batch()), tensor_c_permuted(problem_size.m() * problem_size.n() * problem_size.batch()), tensor_d_unpermuted(problem_size.m() * problem_size.n() * problem_size.batch()), tensor_d_reference(problem_size.m() * problem_size.n() * problem_size.batch()), gemm_mode(problem_size.batch() > 1 ? cutlass::gemm::GemmUniversalMode::kBatched : cutlass::gemm::GemmUniversalMode::kGemm), arguments_permute{ gemm_mode, problem_shape_permute, { tensor_a.get(), stride_A_permute, tensor_b.get(), stride_B_permute, }, { { alpha, beta }, tensor_c.get(), stride_C_permute, tensor_d.get(), stride_D_permute }, hw_info }, workspace_permute(GemmPermute::get_workspace_size(arguments_permute)), arguments_reference{ gemm_mode, problem_shape, { DoPermuteA ? tensor_a_permuted.get() : tensor_a.get(), stride_A, DoPermuteB ? tensor_b_permuted.get() : tensor_b.get(), stride_B }, { { alpha, beta }, DoPermuteC ? tensor_c_permuted.get() : tensor_c.get(), stride_C, DoPermuteD ? tensor_d_unpermuted.get() : tensor_d_reference.get(), stride_D }, hw_info }, workspace_reference(GemmReference::get_workspace_size(arguments_reference)) { if (options.verbose) { print("Original GEMM problem:\n"); print(" Problem shape: "); print(problem_shape); print("\n"); print(" Layout A: "); print(make_layout(shape_A, stride_A)); print("\n"); print(" Layout B: "); print(make_layout(shape_B, stride_B)); print("\n"); print(" Layout C: "); print(make_layout(shape_C, stride_C)); print("\n"); print(" Layout D: "); print(make_layout(shape_D, stride_D)); print("\n"); print(" Tile shape: "); print(TileShape{}); print("\n"); print("With fused permutations:\n"); print(" Problem shape: "); print(problem_shape_permute); print("\n"); print(" Layout A: "); print(make_layout(shape_A_permute, stride_A_permute)); print("\n"); print(" Layout B: "); print(make_layout(shape_B_permute, stride_B_permute)); print("\n"); print(" Layout C: "); print(make_layout(shape_C_permute, stride_C_permute)); print("\n"); print(" Layout D: "); print(make_layout(shape_D_permute, stride_D_permute)); print("\n"); print(" Tile shape: "); print(TileShapePermute{}); print("\n"); } cutlass::reference::device::BlockFillRandomUniform(tensor_a.get(), tensor_a.size(), 1, ElementA(7), ElementA(-8), 0); cutlass::reference::device::BlockFillRandomUniform(tensor_b.get(), tensor_b.size(), 2, ElementB(7), ElementB(-8), 0); cutlass::reference::device::BlockFillRandomUniform(tensor_c.get(), tensor_c.size(), 3, ElementC(7), ElementC(-8), 0); cutlass::reference::device::BlockFillSequential(tensor_d.get(), tensor_d.size(), ElementD(0), ElementD(0)); auto const gemm_init = [](auto & gemm, auto const & arguments, auto & workspace) { cutlass::Status status = gemm.can_implement(arguments); if (status != cutlass::Status::kSuccess) { std::cerr << "Requested GEMM kernel cannot be used for this problem.\n" << "Check problem sizes and alignment requirements." << std::endl; exit(EXIT_FAILURE); } status = gemm.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); }; gemm_init(gemm_permute, arguments_permute, workspace_permute ); gemm_init(gemm_reference, arguments_reference, workspace_reference); } void debug_output(std::ostream & os) { auto print_tensor = [](std::ostream &os, char const * name, auto const & data, auto shape, auto stride) { std::vector<remove_cvref_t<decltype(*data.get())>> h_data(data.size()); data.copy_to_host(h_data.data()); Tensor t = make_tensor(h_data.data(), shape, stride); os << "\n" << name << ": " << std::setw(4) << t << std::endl; }; auto [M,N,K,L] = problem_shape; print_tensor(os, "A", tensor_a, make_shape(M,K,L), stride_A); print_tensor(os, "B", tensor_b, make_shape(N,K,L), stride_B); print_tensor(os, "C", tensor_c, make_shape(M,N,L), stride_C); print_tensor(os, "D", tensor_d, make_shape(M,N,L), stride_D); print_tensor(os, "D reference", tensor_d_reference, make_shape(M,N,L), stride_D); } template<bool DoTime, class Gemm> static float run_gemm(Gemm &gemm) { GpuTimer timer; if constexpr (DoTime) timer.start(); cutlass::Status status = gemm.run(); CUTLASS_CHECK(status); if constexpr (DoTime) timer.stop(); if constexpr (DoTime) return timer.elapsed_millis(); else return 0; } template<bool DoTime, class Permute, class Element, class Shape, class Stride> static float run_permute(cutlass::device_memory::allocation<Element> const & input, cutlass::device_memory::allocation<Element> & output, Layout<Shape, Stride> const& layout, cutlass::KernelHardwareInfo const & hw_info) { auto idx = find_if(layout.stride(), [](auto x){ return not is_constant<1, decltype(x)>{}; }); auto stride = get<decltype(idx)::value>(layout.stride()); GpuTimer timer; if constexpr (DoTime) timer.start(); permute<PermuteTraits<Permute>::kBatched, Permute>(input.get(), output.get(), size(take<0,2>(layout)), static_cast<int>(stride), shape<2>(layout), hw_info); if constexpr (DoTime) timer.stop(); if constexpr (DoTime) return timer.elapsed_millis(); else return 0; }; template<bool DoTime, class Gemm2> auto run_reference(Gemm2 &gemm) { float permute_time = 0.f; if constexpr (DoPermuteA) { auto orig_layout = make_original_layout<PermuteAReal, false>(make_layout(shape_A, stride_A)); permute_time += run_permute<DoTime, PermuteA>(tensor_a, tensor_a_permuted, orig_layout, hw_info); } if constexpr (DoPermuteB) { auto orig_layout = make_original_layout<PermuteBReal, true>(make_layout(shape_B, stride_B)); permute_time += run_permute<DoTime, PermuteB>(tensor_b, tensor_b_permuted, select<1,0,2>(orig_layout), hw_info); } if constexpr (DoPermuteC) { auto orig_layout = make_original_layout<PermuteCReal, false>(make_layout(shape_C, stride_C)); permute_time += run_permute<DoTime, PermuteC>(tensor_c, tensor_c_permuted, orig_layout, hw_info); } float gemm_time = run_gemm<DoTime>(gemm); if constexpr (DoPermuteD) { auto orig_layout = make_layout(shape_D, stride_D); permute_time += run_permute<DoTime, PermuteD>(tensor_d_unpermuted, tensor_d_reference, orig_layout, hw_info); } return cute::make_tuple(gemm_time, permute_time); } bool verify() { run_gemm<false>(gemm_permute); run_reference<false>(gemm_reference); return cutlass::reference::device::BlockCompareEqual(tensor_d.get(), tensor_d_reference.get(), tensor_d.size()); } bool run(Options const &options) { if (options.reference_check) { if (!verify()) { std::cout << "Failed validation" << std::endl; #if 1 debug_output(std::cout); #endif return false; } else { std::cout << "Passed validation" << std::endl; } } // // Run profiling loop // auto const benchmark = [&](auto name, auto func) { GpuTimer timer; timer.start(); for (int iter = 0; iter < options.iterations; ++iter) { func(); } timer.stop(); double runtime = timer.elapsed_millis() / double(options.iterations); double gflops = 2 * double(problem_size.product()) / 1e6 / runtime; // Two flops per multiply-add std::cout << name << ":\n"; std::cout << " Runtime: " << runtime << " ms\n"; std::cout << " GFLOPs: " << gflops << "\n"; }; benchmark("Fused GEMM+permute", [&](){ run_gemm<false>(gemm_permute); }); benchmark("Unfused GEMM+permute", [&](){ run_reference<false>(gemm_reference); }); benchmark("Standalone GEMM only", [&](){ run_gemm<false>(gemm_reference); }); std::cout << "\n"; return true; } }; #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) } // namespace example int main(int argc, char const **argv) { bool notSupported = false; // CUDA 12 minimum required if (__CUDACC_VER_MAJOR__ < 12) { std::cerr << "This example requires CUDA Toolkit version 12 or later.\n"; notSupported = true; } cudaDeviceProp props; CUDA_CHECK(cudaGetDeviceProperties(&props, 0)); if (props.major < 9) { std::cerr << "This example requires a device with compute capability 90 or higher.\n"; notSupported = true; } if (notSupported) { return EXIT_SUCCESS; // Do not fail CI checks on unsupported systems } #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) example::Options options; options.parse(argc, argv); if (options.help) { options.print_usage(std::cout) << "\n"; return EXIT_SUCCESS; } if (!options.valid()) { std::cerr << "Invalid arguments." << "\n"; return EXIT_FAILURE; } cutlass::KernelHardwareInfo hw_info; hw_info.device_id = 0; hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id); using namespace cute; // Define the data types using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using ElementD = cutlass::half_t; using ElementAccumulator = float; using ElementEpilogue = float; // M=64 for TMA epilogue using TileShape = Shape<_128,_128,_64>; // Cluster launch with TMA multicast for better perf using ClusterShape = Shape<_2,_2,_1>; bool result = true; #define COMPILE_ALL_EXAMPLES 0 // REGULAR GEMMS { print("===================================================\n"); print("Tensor A: RowMajor, Tensor4DPermute0213<8,16>\n"); using Runner = example::ExampleRunner<ElementA, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<8, 16>, ElementB, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementC, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementD, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementAccumulator, ElementEpilogue, TileShape, ClusterShape>; Runner runner(options, hw_info); result &= runner.run(options); } #if COMPILE_ALL_EXAMPLES { print("===================================================\n"); print("Tensor A: ColumnMajor, Tensor4DPermute0213<8,16>\n"); using Runner = example::ExampleRunner<ElementA, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<8, 16>, ElementB, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementC, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementD, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementAccumulator, ElementEpilogue, TileShape, ClusterShape>; Runner runner(options, hw_info); result &= runner.run(options); } { print("===================================================\n"); print("Tensor B: RowMajor, Tensor4DPermute0213<8,16>\n"); using Runner = example::ExampleRunner<ElementA, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, ElementB, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<8, 16>, ElementC, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementD, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementAccumulator, ElementEpilogue, TileShape, ClusterShape>; Runner runner(options, hw_info); result &= runner.run(options); } #endif { print("===================================================\n"); print("Tensor B: ColumnMajor, Tensor4DPermute0213<8,16>\n"); using Runner = example::ExampleRunner<ElementA, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementB, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<8, 16>, ElementC, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementD, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementAccumulator, ElementEpilogue, TileShape, ClusterShape>; Runner runner(options, hw_info); result &= runner.run(options); } { print("===================================================\n"); print("Tensor D: RowMajor, Tensor4DPermute0213<8,16>\n"); using Runner = example::ExampleRunner<ElementA, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementB, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementC, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementD, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<8, 16>, ElementAccumulator, ElementEpilogue, TileShape, ClusterShape>; Runner runner(options, hw_info); result &= runner.run(options); } #if COMPILE_ALL_EXAMPLES { print("===================================================\n"); print("Tensor D: ColumnMajor, Tensor4DPermute0213<8,16>\n"); using Runner = example::ExampleRunner<ElementA, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementB, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementC, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementD, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<8, 16>, ElementAccumulator, ElementEpilogue, TileShape, ClusterShape>; Runner runner(options, hw_info); result &= runner.run(options); } #endif { print("===================================================\n"); print("Tensor A: RowMajor, Tensor5DPermute20314<16,8,4>\n"); using Runner = example::ExampleRunner<ElementA, cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<16,8,4>, ElementB, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementC, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementD, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementAccumulator, ElementEpilogue, TileShape, ClusterShape>; Runner runner(options, hw_info); result &= runner.run(options); } #if COMPILE_ALL_EXAMPLES { print("===================================================\n"); print("Tensor A: ColumnMajor, Tensor5DPermute02413<16,8,4>\n"); using Runner = example::ExampleRunner<ElementA, cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<16,8,4>, ElementB, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementC, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementD, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementAccumulator, ElementEpilogue, TileShape, ClusterShape>; Runner runner(options, hw_info); result &= runner.run(options); } #endif { print("===================================================\n"); print("Tensor D: RowMajor, Tensor5DPermute20314<16,8,4>\n"); using Runner = example::ExampleRunner<ElementA, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementB, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementC, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementD, cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<16,8,4>, ElementAccumulator, ElementEpilogue, TileShape, ClusterShape>; Runner runner(options, hw_info); result &= runner.run(options); } #if COMPILE_ALL_EXAMPLES { print("===================================================\n"); print("Tensor D: ColumnMajor, Tensor5DPermute02413<16,8,4>\n"); using Runner = example::ExampleRunner<ElementA, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementB, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementC, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementD, cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<16,8,4>, ElementAccumulator, ElementEpilogue, TileShape, ClusterShape>; Runner runner(options, hw_info); result &= runner.run(options); } #endif // BATCHED GEMMS { print("===================================================\n"); print("Tensor A: RowMajor, Tensor4DPermuteBMM0213<8>\n"); using Runner = example::ExampleRunner<ElementA, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<8>, ElementB, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementC, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementD, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementAccumulator, ElementEpilogue, TileShape, ClusterShape>; Runner runner(options, hw_info); result &= runner.run(options); } { print("===================================================\n"); print("Tensor D: RowMajor, Tensor4DPermuteBMM0213<8>\n"); using Runner = example::ExampleRunner<ElementA, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementB, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementC, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementD, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<8>, ElementAccumulator, ElementEpilogue, TileShape, ClusterShape>; Runner runner(options, hw_info); result &= runner.run(options); } #if COMPILE_ALL_EXAMPLES { print("===================================================\n"); print("Tensor A: ColumnMajor, Tensor4DPermuteBMM0321<8>\n"); using Runner = example::ExampleRunner<ElementA, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<8>, ElementB, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementC, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementD, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementAccumulator, ElementEpilogue, TileShape, ClusterShape>; Runner runner(options, hw_info); result &= runner.run(options); } { print("===================================================\n"); print("Tensor D: RowMajor, Tensor4DPermuteBMM0321<8>\n"); using Runner = example::ExampleRunner<ElementA, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementB, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementC, cutlass::layout::RowMajor, cutlass::layout::NoPermute, ElementD, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<8>, ElementAccumulator, ElementEpilogue, TileShape, ClusterShape>; Runner runner(options, hw_info); result &= runner.run(options); } #endif return result ? EXIT_SUCCESS : EXIT_FAILURE; #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) }
cutlass/examples/53_hopper_gemm_permute/53_hopper_gemm_permute.cu/0
{ "file_path": "cutlass/examples/53_hopper_gemm_permute/53_hopper_gemm_permute.cu", "repo_id": "cutlass", "token_count": 18645 }
14
/*************************************************************************************************** * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Example of running an Ada FP8 GEMM. In addition to using FP8 Tensor Core instructions, the Ada FP8 GEMM uses a distinct epilogue that enables additional scaling of operands/outputs, storing a pre-activation-function output tensor (called the "auxiliary" output), and computing the absolute maximum value of the outputs. Pseudocode for this epilogue is as follows: Aux = ((alpha * scale_a * scale_b) * accumulator) + ((beta * scale_c) * source) + bias D = activation(Aux) if Aux is fp8 type: abs_max_output = max( abs(aux) | (for every aux in Aux)) Aux = scale_aux * Aux endif if D is fp8 type: abs_max_output = max( abs(d) | (for every d in D)) D = scale_d * D endif Parameter Aux is optionally stored to global memory */ #include <iostream> #include <fstream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/numeric_conversion.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm_complex.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/distribution.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/epilogue/thread/activation.h" #include "cutlass/epilogue/thread/linear_combination_generic_with_scaling.h" #include "cutlass/gemm/device/gemm_universal_with_absmax.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_coord.h" #include "cutlass/gemm/device/gemm_universal_adapter.h" using ElementA = cutlass::float_e4m3_t; using ElementB = cutlass::float_e4m3_t; using ElementOutput = cutlass::float_e4m3_t; using ElementAuxOutput = ElementOutput; using ElementAccumulator = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; static int const kStages = 3; static int const kAlignmentA = 16; static int const kAlignmentB = 16; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax< cutlass::epilogue::thread::ReLu, ElementOutput, ElementAuxOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >; template <typename MathOperator> using Gemm_ = cutlass::gemm::device::GemmUniversalWithAbsMax< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89, cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>, EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages, kAlignmentA, kAlignmentB, MathOperator >; using ElementAbsmax = typename EpilogueOutputOp::ElementAbsmax; // Command line options parsing struct Options { bool help; bool error; bool reference_check; cutlass::gemm::GemmCoord problem_size; int iterations; int warmup_iterations; bool scale_A; bool scale_B; bool scale_C; float alpha; float beta; Options(): help(false), error(false), reference_check(false), iterations(20), warmup_iterations(5), scale_A(true), scale_B(true), scale_C(true), alpha(1.f), beta(0.f) { } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; return; } cmd.get_cmd_line_argument("iterations", iterations, 20); cmd.get_cmd_line_argument("warmup_iterations", warmup_iterations, 5); cmd.get_cmd_line_argument("reference-check", reference_check, false); cmd.get_cmd_line_argument("scale-A", scale_A, true); cmd.get_cmd_line_argument("scale-B", scale_B, true); cmd.get_cmd_line_argument("scale-C", scale_C, true); cmd.get_cmd_line_argument("alpha", alpha, 1.f); cmd.get_cmd_line_argument("beta", beta, 0.f); int m, n, k; cmd.get_cmd_line_argument("m", m, 1024); cmd.get_cmd_line_argument("n", n, 1024); cmd.get_cmd_line_argument("k", k, 1024); problem_size = cutlass::gemm::GemmCoord{m, n, k}; } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "58_ada_fp8_gemm\n\n" << " This example executes a GEMM using Ada FP8 Tensor Core operations. In addition to performing\n" << " a normal GEMM, the kernel performs the following operations:\n" << " Aux = ((alpha * scale_a * scale_b) * accumulator) + ((beta * scale_c) * source) + bias\n" << " D = activation(Aux)\n\n" << " if Aux is fp8:\n" << " abs_max_output = max( abs(aux) | (for every aux in Aux) )\n" << " Aux = scale_aux * Aux\n\n" << " if D is fp8 type:\n" << " abs_max_output = max( abs(d) | (for every d in D) )\n" << " D = scale_d * D\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement\n\n" << " --m=<int> Sets the M dimension of the GEMM\n" << " --n=<int> Sets the N dimension of the GEMM\n" << " --k=<int> Sets the K dimension of the GEMM\n" << " --scale-A=<bool> Whether to apply a scaling factor to operand A (default: true)\n" << " --scale-B=<bool> Whether to apply a scaling factor to operand B (default: true)\n" << " --scale-C=<bool> Whether to apply a scaling factor to operand C (default: true)\n" << " --iterations=<int> Number of profiling iterations to perform\n" << " --warmup-iterations=<int> Number of warmup iterations to perform\n" << " --reference-check=<bool> If true, performs reference check\n"; return out; } /// Compute performance in GFLOP/s float gflops(float runtime_s) const { // Two flops per multiply-add return 2.0f * float(problem_size.product()) / float(1.0e9) / runtime_s; } }; /// Helper class to run the kernel template <typename Gemm> struct TestbedRunner { using ElementAccumulator = typename Gemm::ElementAccumulator; using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute; using ElementScalingFactor = typename Gemm::EpilogueOutputOp::ElementScalingFactor; static bool const kScaleAux = Gemm::EpilogueOutputOp::kIsScalingAndAmaxAuxOutputNeeded; static bool const kScaleOutput = Gemm::EpilogueOutputOp::kIsScalingAndAmaxOutputNeeded; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; uint64_t seed; cutlass::HostTensor<typename Gemm::ElementA, typename Gemm::LayoutA> tensor_A; cutlass::HostTensor<typename Gemm::ElementB, typename Gemm::LayoutB> tensor_B; cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_C; cutlass::HostTensor<typename Gemm::EpilogueOutputOp::ElementAuxOutput, typename Gemm::LayoutC> tensor_Aux; cutlass::HostTensor<typename Gemm::EpilogueOutputOp::ElementOutput, typename Gemm::LayoutC> tensor_D; cutlass::HostTensor<typename Gemm::ElementC, typename Gemm::LayoutC> tensor_Vector; cutlass::HostTensor<ElementAccumulator, typename Gemm::LayoutC> tmp_D; cutlass::HostTensor<typename Gemm::EpilogueOutputOp::ElementOutput, typename Gemm::LayoutC> reference_D; cutlass::HostTensor<typename Gemm::EpilogueOutputOp::ElementAuxOutput, typename Gemm::LayoutC> reference_Aux; cutlass::HostTensor<ElementScalingFactor, typename Gemm::LayoutC> scale_A; cutlass::HostTensor<ElementScalingFactor, typename Gemm::LayoutC> scale_B; cutlass::HostTensor<ElementScalingFactor, typename Gemm::LayoutC> scale_C; cutlass::HostTensor<ElementScalingFactor, typename Gemm::LayoutC> scale_D; cutlass::HostTensor<ElementScalingFactor, typename Gemm::LayoutC> scale_Aux; cutlass::HostTensor<ElementAbsmax, typename Gemm::LayoutC> abs_max_Aux; cutlass::HostTensor<ElementAbsmax, typename Gemm::LayoutC> abs_max_D; cutlass::HostTensor<ElementAbsmax, typename Gemm::LayoutC> reference_abs_max_Aux; cutlass::HostTensor<ElementAbsmax, typename Gemm::LayoutC> reference_abs_max_D; // // Methods // TestbedRunner( bool scaleA = true, bool scaleB = true, bool scaleC = true, cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint64_t seed_ = 2080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } /// Helper to initialize scaling factors template <typename Element, typename Layout> bool initialize_scale_factor(cutlass::TensorView<Element, Layout> view, uint64_t seed, int bits=0) { cutlass::reference::host::TensorFillRandomUniform(view, seed, double(1.), double(0.), bits); return true; } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillRandomUniform( view, seed, scope_max, scope_min, 0); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential( view.data(), view.capacity()); } else { std::cerr << "Not implemented"; return false; } return true; } /// Initializes data structures void initialize(const Options& options) { // // Allocate the GEMM workspace // tensor_A.resize(options.problem_size.mk()); tensor_B.resize(options.problem_size.kn()); tensor_C.resize(options.problem_size.mn()); tensor_D.resize(options.problem_size.mn()); tensor_Vector.resize({1, options.problem_size.n()}); reference_D.resize(options.problem_size.mn(), false); tmp_D.resize(options.problem_size.mn(), false); initialize_tensor(tensor_A.host_view(), init_A, seed + 2019); initialize_tensor(tensor_B.host_view(), init_B, seed + 2018); initialize_tensor(tensor_C.host_view(), init_C, seed + 2017); initialize_tensor(tensor_Vector.host_view(), init_C, seed + 2020); // It is possible to randomly initialize to all zeros, so override this with non-zeros // in the upper left corner of each operand. cutlass::Coord<2> origin(0); tensor_A.host_view().at(origin) = typename Gemm::ElementA(1); tensor_B.host_view().at(origin) = typename Gemm::ElementB(1); tensor_C.host_view().at(origin) = typename Gemm::ElementC(1); tensor_Vector.host_view().at(origin) = typename Gemm::ElementC(1); cutlass::reference::host::TensorFill(tensor_D.host_view()); cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view()); tensor_A.sync_device(); tensor_B.sync_device(); tensor_C.sync_device(); tensor_D.sync_device(); tensor_Vector.sync_device(); int scale_bits = 2; if (options.scale_A) { scale_A.resize({1, 1}); initialize_scale_factor(scale_A.host_view(), seed + 2021, scale_bits); scale_A.sync_device(); } if (options.scale_B) { scale_B.resize({1, 1}); initialize_scale_factor(scale_B.host_view(), seed + 2022, scale_bits); scale_B.sync_device(); } if (options.scale_C) { scale_C.resize({1, 1}); initialize_scale_factor(scale_C.host_view(), seed + 2023, scale_bits); scale_C.sync_device(); } if (kScaleOutput) { scale_D.resize({1, 1}); initialize_scale_factor(scale_D.host_view(), seed + 2024, scale_bits); scale_D.sync_device(); abs_max_D.resize({1, 1}); cutlass::reference::host::TensorFill(abs_max_D.host_view()); abs_max_D.sync_device(); reference_abs_max_D.resize({1, 1}); } if (kScaleAux) { tensor_Aux.resize(options.problem_size.mn()); cutlass::reference::host::TensorFill(tensor_Aux.host_view()); tensor_Aux.sync_device(); scale_Aux.resize({1, 1}); initialize_scale_factor(scale_Aux.host_view(), seed + 2025, scale_bits); scale_Aux.sync_device(); abs_max_Aux.resize({1, 1}); cutlass::reference::host::TensorFill(abs_max_Aux.host_view()); abs_max_Aux.sync_device(); reference_Aux.resize(options.problem_size.mn(), false); reference_abs_max_Aux.resize({1, 1}); } } /// Compares computed reference with device reference and outputs to a file if incorrect bool compare_reference(const Options& options) { tensor_D.sync_host(); bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view()); if (kScaleAux) { tensor_Aux.sync_host(); abs_max_Aux.sync_host(); passed &= cutlass::reference::host::TensorEquals(reference_Aux.host_view(), tensor_Aux.host_view()); passed &= cutlass::reference::host::TensorEquals(abs_max_Aux.host_view(), reference_abs_max_Aux.host_view()); } if (kScaleOutput) { abs_max_D.sync_host(); passed &= cutlass::reference::host::TensorEquals(abs_max_D.host_view(), reference_abs_max_D.host_view()); } if (!passed) { std::cerr << "Reference check failed" << std::endl; std::string output_file = "testbed_with_amax_errors.txt"; std::ofstream file(output_file); file << "problem: " << options.problem_size << ", alpha: " << options.alpha << ", beta: " << options.beta << "\n\n"; file << "A =\n" << tensor_A.host_view() << "\nB =\n" << tensor_B.host_view() << "\nC =\n" << tensor_C.host_view() << "\nVector =\n" << tensor_Vector.host_view() << "\nScaleA = " << scale_A.host_view() << "\nScaleB = " << scale_B.host_view() << "\nScaleC = " << scale_C.host_view() << "\nScaleD = " << scale_D.host_view() << "\nScaleAux = " << scale_Aux.host_view() << "\n\nReference D =\n" << reference_D.host_view() << "\nComputed D =\n" << tensor_D.host_view(); if (kScaleAux) { file << "\n\nReference Aux =\n" << reference_Aux.host_view() << "\nComputed Aux =\n" << tensor_Aux.host_view() << "\n\nReference Absmax Aux = " << reference_abs_max_Aux.host_view() << "\nComputed Absmax Aux = " << abs_max_Aux.host_view(); } if (kScaleOutput) { file << "\n\nReference Absmax D = " << reference_abs_max_D.host_view() << "\nComputed Absmax D = " << abs_max_D.host_view(); } std::cerr << "Dumped results to " << output_file << std::endl; } return passed; } /// Verifies the result is a GEMM bool verify(const Options& options) { cutlass::Coord<2> origin(0); ElementCompute scaled_alpha = options.alpha; if (options.scale_A) { scaled_alpha *= scale_A.host_view().at(origin); } if (options.scale_B) { scaled_alpha *= scale_B.host_view().at(origin); } ElementCompute scaled_beta = options.beta; if (options.scale_C) { scaled_beta *= scale_C.host_view().at(origin); } // // Verify // cutlass::reference::host::GemmComplex< typename Gemm::ElementA, typename Gemm::LayoutA, typename Gemm::ElementB, typename Gemm::LayoutB, typename Gemm::ElementC, typename Gemm::LayoutC, ElementCompute, ElementAccumulator, ElementAccumulator >( options.problem_size, scaled_alpha, tensor_A.host_ref(), Gemm::kTransformA, tensor_B.host_ref(), Gemm::kTransformB, scaled_beta, tensor_C.host_ref(), tmp_D.host_ref(), ElementAccumulator(0) ); ElementCompute tmp_abs_max_Aux(0.); ElementCompute tmp_abs_max_D(0.); cutlass::NumericConverter<ElementCompute, typename Gemm::ElementC> cvt_c_to_compute; cutlass::NumericConverter<ElementCompute, ElementAccumulator> cvt_accum_to_compute; cutlass::NumericConverter<ElementAccumulator, ElementCompute> cvt_compute_to_accum; cutlass::NumericConverter<typename Gemm::EpilogueOutputOp::ElementOutput, ElementCompute> cvt_compute_to_d; cutlass::NumericConverter<typename Gemm::EpilogueOutputOp::ElementAuxOutput, ElementCompute> cvt_compute_to_aux; cutlass::absolute_value_op<ElementCompute> abs; cutlass::maximum_with_nan_propogation<ElementCompute> max; cutlass::epilogue::thread::ReLu<ElementCompute> act; ElementScalingFactor d_scale = kScaleOutput ? scale_D.host_view().at(origin) : ElementScalingFactor(1.); for (int m = 0; m < options.problem_size.m(); ++m) { for (int n = 0; n < options.problem_size.n(); ++n) { ElementCompute intermediate = cvt_accum_to_compute(tmp_D.host_view().at({m, n})); ElementCompute bias = cvt_c_to_compute(tensor_Vector.host_view().at({0, n})); ElementCompute aux = intermediate + bias; ElementCompute d = act(aux); tmp_abs_max_Aux = max(abs(aux), tmp_abs_max_Aux); tmp_abs_max_D = max(abs(d), tmp_abs_max_D); reference_D.host_view().at({m, n}) = cvt_compute_to_d(d * d_scale); if (kScaleAux) { reference_Aux.host_view().at({m, n}) = cvt_compute_to_aux(aux * scale_Aux.host_view().at(origin)); } } } if (kScaleAux) { reference_abs_max_Aux.host_view().at(origin) = cvt_compute_to_accum(tmp_abs_max_Aux); } if (kScaleOutput) { reference_abs_max_D.host_view().at(origin) = cvt_compute_to_accum(tmp_abs_max_D); } return compare_reference(options); } /// Returns true if the CUDA device is sufficient to execute the kernel. bool sufficient() const { if (__CUDACC_VER_MAJOR__ < 12 || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ < 4)) { std::cerr << "This example requires CUDA 12.4 or greater." << std::endl; return false; } size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage); cudaDeviceProp properties; int device_idx; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { std::cerr << "cudaGetDevice() failed with error: " << cudaGetErrorString(result) << std::endl; return false; } result = cudaGetDeviceProperties(&properties, device_idx); if (result != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() failed with error: " << cudaGetErrorString(result) << std::endl; return false; } if (properties.major < 8 || (properties.major == 8 && properties.minor < 9)) { std::cerr << "CUTLASS's Ada FP8 GEMM example requires a device of compute capability 89 or higher.\n" << std::endl; return false; } if (properties.sharedMemPerBlockOptin < smem_size) { std::cerr << "Insufficient shared memory. Need " << smem_size << ", but device only has " << properties.sharedMemPerBlockOptin << std::endl; return false; } return true; } /// Executes one test bool run(Options& options) { // Waive test if insufficient CUDA device if (!sufficient()) { std::cerr << "Insufficient resources to run the kernel." << std::endl; return false; } this->initialize(options); // // Initialize the GEMM operator // typename Gemm::EpilogueOutputOp::Params::ActivationParams activation_params{ ElementCompute(options.alpha), ElementCompute(options.beta) }; typename Gemm::EpilogueOutputOp::Params epilogue_params{ activation_params, scale_A.device_data(), scale_B.device_data(), scale_C.device_data(), scale_D.device_data(), scale_Aux.device_data(), abs_max_Aux.device_data(), abs_max_D.device_data() }; typename Gemm::Arguments arguments{ cutlass::gemm::GemmUniversalMode::kGemm, options.problem_size, /* batch_count = */ 1, epilogue_params, tensor_A.device_data(), tensor_B.device_data(), tensor_C.device_data(), tensor_D.device_data(), tensor_Aux.device_data(), tensor_Vector.device_data(), options.problem_size.m() * options.problem_size.k(), options.problem_size.n() * options.problem_size.k(), options.problem_size.m() * options.problem_size.n(), options.problem_size.m() * options.problem_size.n(), (int)options.problem_size.m(), // Batch stride vector tensor_A.layout().stride(0), tensor_B.layout().stride(0), tensor_C.layout().stride(0), tensor_D.layout().stride(0), (int64_t)0 // Leading dimension of vector. This must be 0 }; Gemm gemm_op; cutlass::Status status = gemm_op.can_implement(arguments); if (status != cutlass::Status::kSuccess) { std::cerr << "Gemm::can_implement() failed" << std::endl; return false; } size_t workspace_size = Gemm::get_workspace_size(arguments); cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); status = gemm_op.initialize(arguments, workspace.get()); if (status != cutlass::Status::kSuccess) { std::cerr << "Gemm::initialize() failed" << std::endl; return false; } // // Run the GEMM // status = gemm_op(); if (status != cutlass::Status::kSuccess) { std::cerr << "Gemm::run() failed" << std::endl; return false; } cudaError_t cuda_error = cudaDeviceSynchronize(); if (cuda_error != cudaSuccess) { std::cerr << "CUDA error: " << cudaGetErrorString(cuda_error) << std::endl; return false; } // // Verify // bool passed = true; if (options.reference_check) { passed &= this->verify(options); } else { std::cout << "Skipped reference check" << std::endl; } // // Warm up // for (int i = 0; i < options.warmup_iterations; ++i) { gemm_op(); } // // Profile // cudaEvent_t events[2]; cudaError_t error; for (auto & event : events) { error = cudaEventCreate(&event); if (error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(error) << std::endl; return false; } } // Record an event at the start of a series of GEMM operations error = cudaEventRecord(events[0]); if (error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(error) << std::endl; return false; } // Run profiling loop for (int iter = 0; iter < options.iterations; ++iter) { gemm_op(); } // Record an event when the GEMM operations have been launched. error = cudaEventRecord(events[1]); if (error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(error) << std::endl; return false; } // Wait for work on the device to complete. error = cudaEventSynchronize(events[1]); if (error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(error) << std::endl; return false; } // Measure elapsed runtime float runtime_ms = 0; error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(error) << std::endl; return false; } // Compute average runtime and GFLOPs. runtime_ms = runtime_ms / float(options.iterations); float gflops = options.gflops(runtime_ms / 1000.0f); std::cout << "Problem size: " << options.problem_size.m() << 'x' << options.problem_size.n() << 'x' << options.problem_size.k() << std::endl; std::cout << "Runtime (ms): " << runtime_ms << std::endl; std::cout << "GFLOPs/sec: " << gflops << std::endl; // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } return passed; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const** argv) { cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (__CUDACC_VER_MAJOR__ < 12 || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ < 4) || (props.major != 8 && props.minor != 9)) { // // This example requires an NVIDIA Ada-architecture GPU. // std::cout << "CUTLASS's FP8 SM89 example requires a GPU of NVIDIA's Ada architecture " << "and CUDA toolkit version 12.4 or later.\n"; return 0; } // // Parse options // Options options; options.parse(argc, argv); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.error) { std::cerr << "Aborting execution." << std::endl; return -1; } std::cout << "Running GEMM with staged accumulation (OpMultiplyAdd)" << std::endl; std::cout << "=====================================================" << std::endl; TestbedRunner<Gemm_<cutlass::arch::OpMultiplyAdd>> testbed_staged_accum; bool passed = testbed_staged_accum.run(options); if (passed) { std::cout << "Passed" << std::endl; } else { std::cout << "Failed" << std::endl; } std::cout << "\nRunning GEMM with fast accumulation (OpMultiplyAddFastAccum)" << std::endl; std::cout << "============================================================" << std::endl; TestbedRunner<Gemm_<cutlass::arch::OpMultiplyAddFastAccum>> testbed_fast_accum; passed = testbed_fast_accum.run(options); if (passed) { std::cout << "Passed" << std::endl; } else { std::cout << "Failed" << std::endl; } return 0; }
cutlass/examples/58_ada_fp8_gemm/ada_fp8_gemm.cu/0
{ "file_path": "cutlass/examples/58_ada_fp8_gemm/ada_fp8_gemm.cu", "repo_id": "cutlass", "token_count": 11448 }
15
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> #include "cutlass/util/print_error.hpp" #include "cutlass/util/GPU_Clock.hpp" #include "cutlass/util/helper_cuda.hpp" // This is a simple tutorial showing several ways to partition a tensor into tiles then // perform efficient, coalesced copies. This example also shows how to vectorize accesses // which may be a useful optimization or required for certain workloads. // // `copy_kernel()` and `copy_kernel_vectorized()` each assume a pair of tensors with // dimensions (m, n) have been partitioned via `tiled_divide()`. // // The result are a part of compatible tensors with dimensions ((M, N), m', n'), where // (M, N) denotes a statically sized tile, and m' and n' denote the number of such tiles // within the tensor. // // Each statically sized tile is mapped to a CUDA threadblock which performs efficient // loads and stores to Global Memory. // // `copy_kernel()` uses `cute::local_partition()` to partition the tensor and map // the result to threads using a striped indexing scheme. Threads themselve are arranged // in a (ThreadShape_M, ThreadShape_N) arrangement which is replicated over the tile. // // `copy_kernel_vectorized()` uses `cute::make_tiled_copy()` to perform a similar // partitioning using `cute::Copy_Atom` to perform vectorization. The actual vector // size is defined by `ThreadShape`. // // This example assumes the overall tensor shape is divisible by the tile size and // does not perform predication. /// Simple copy kernel. // // Uses local_partition() to partition a tile among threads arranged as (THR_M, THR_N). template <class TensorS, class TensorD, class ThreadLayout> __global__ void copy_kernel(TensorS S, TensorD D, ThreadLayout) { using namespace cute; // Slice the tiled tensors Tensor tile_S = S(make_coord(_,_), blockIdx.x, blockIdx.y); // (BlockShape_M, BlockShape_N) Tensor tile_D = D(make_coord(_,_), blockIdx.x, blockIdx.y); // (BlockShape_M, BlockShape_N) // Construct a partitioning of the tile among threads with the given thread arrangement. // Concept: Tensor ThrLayout ThrIndex Tensor thr_tile_S = local_partition(tile_S, ThreadLayout{}, threadIdx.x); // (ThrValM, ThrValN) Tensor thr_tile_D = local_partition(tile_D, ThreadLayout{}, threadIdx.x); // (ThrValM, ThrValN) // Construct a register-backed Tensor with the same shape as each thread's partition // Use make_tensor to try to match the layout of thr_tile_S Tensor fragment = make_tensor_like(thr_tile_S); // (ThrValM, ThrValN) // Copy from GMEM to RMEM and from RMEM to GMEM copy(thr_tile_S, fragment); copy(fragment, thr_tile_D); } /// Vectorized copy kernel. /// /// Uses `make_tiled_copy()` to perform a copy using vector instructions. This operation /// has the precondition that pointers are aligned to the vector size. /// template <class TensorS, class TensorD, class ThreadLayout, class VecLayout> __global__ void copy_kernel_vectorized(TensorS S, TensorD D, ThreadLayout, VecLayout) { using namespace cute; using Element = typename TensorS::value_type; // Slice the tensors to obtain a view into each tile. Tensor tile_S = S(make_coord(_, _), blockIdx.x, blockIdx.y); // (BlockShape_M, BlockShape_N) Tensor tile_D = D(make_coord(_, _), blockIdx.x, blockIdx.y); // (BlockShape_M, BlockShape_N) // Define `AccessType` which controls the size of the actual memory access. using AccessType = cutlass::AlignedArray<Element, size(VecLayout{})>; // A copy atom corresponds to one hardware memory access. using Atom = Copy_Atom<UniversalCopy<AccessType>, Element>; // Construct tiled copy, a tiling of copy atoms. // // Note, this assumes the vector and thread layouts are aligned with contigous data // in GMEM. Alternative thread layouts are possible but may result in uncoalesced // reads. Alternative vector layouts are also possible, though incompatible layouts // will result in compile time errors. auto tiled_copy = make_tiled_copy( Atom{}, // access size ThreadLayout{}, // thread layout VecLayout{}); // vector layout (e.g. 4x1) // Construct a Tensor corresponding to each thread's slice. auto thr_copy = tiled_copy.get_thread_slice(threadIdx.x); Tensor thr_tile_S = thr_copy.partition_S(tile_S); // (CopyOp, CopyM, CopyN) Tensor thr_tile_D = thr_copy.partition_D(tile_D); // (CopyOp, CopyM, CopyN) // Construct a register-backed Tensor with the same shape as each thread's partition // Use make_fragment because the first mode is the instruction-local mode Tensor fragment = make_fragment_like(thr_tile_D); // (CopyOp, CopyM, CopyN) // Copy from GMEM to RMEM and from RMEM to GMEM copy(tiled_copy, thr_tile_S, fragment); copy(tiled_copy, fragment, thr_tile_D); } /// Main function int main(int argc, char** argv) { // // Given a 2D shape, perform an efficient copy // using namespace cute; using Element = float; // Define a tensor shape with dynamic extents (m, n) auto tensor_shape = make_shape(256, 512); // // Allocate and initialize // thrust::host_vector<Element> h_S(size(tensor_shape)); thrust::host_vector<Element> h_D(size(tensor_shape)); for (size_t i = 0; i < h_S.size(); ++i) { h_S[i] = static_cast<Element>(i); h_D[i] = Element{}; } thrust::device_vector<Element> d_S = h_S; thrust::device_vector<Element> d_D = h_D; // // Make tensors // Tensor tensor_S = make_tensor(make_gmem_ptr(thrust::raw_pointer_cast(d_S.data())), make_layout(tensor_shape)); Tensor tensor_D = make_tensor(make_gmem_ptr(thrust::raw_pointer_cast(d_D.data())), make_layout(tensor_shape)); // // Tile tensors // // Define a statically sized block (M, N). // Note, by convention, capital letters are used to represent static modes. auto block_shape = make_shape(Int<128>{}, Int<64>{}); if ((size<0>(tensor_shape) % size<0>(block_shape)) || (size<1>(tensor_shape) % size<1>(block_shape))) { std::cerr << "The tensor shape must be divisible by the block shape." << std::endl; return -1; } // Equivalent check to the above if (not weakly_compatible(block_shape, tensor_shape)) { std::cerr << "Expected the tensors to be weakly compatible with the block_shape." << std::endl; return -1; } // Tile the tensor (m, n) ==> ((M, N), m', n') where (M, N) is the static tile // shape, and modes (m', n') correspond to the number of tiles. // // These will be used to determine the CUDA kernel grid dimensions. Tensor tiled_tensor_S = tiled_divide(tensor_S, block_shape); // ((M, N), m', n') Tensor tiled_tensor_D = tiled_divide(tensor_D, block_shape); // ((M, N), m', n') // Thread arrangement Layout thr_layout = make_layout(make_shape(Int<32>{}, Int<8>{})); // Vector dimensions Layout vec_layout = make_layout(make_shape(Int<4>{}, Int<1>{})); // // Determine grid and block dimensions // dim3 gridDim (size<1>(tiled_tensor_D), size<2>(tiled_tensor_D)); // Grid shape corresponds to modes m' and n' dim3 blockDim(size(thr_layout)); // // Launch the kernel // copy_kernel_vectorized<<< gridDim, blockDim >>>( tiled_tensor_S, tiled_tensor_D, thr_layout, vec_layout); cudaError result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "CUDA Runtime error: " << cudaGetErrorString(result) << std::endl; return -1; } // // Verify // h_D = d_D; int32_t errors = 0; int32_t const kErrorLimit = 10; for (size_t i = 0; i < h_D.size(); ++i) { if (h_S[i] != h_D[i]) { std::cerr << "Error. S[" << i << "]: " << h_S[i] << ", D[" << i << "]: " << h_D[i] << std::endl; if (++errors >= kErrorLimit) { std::cerr << "Aborting on " << kErrorLimit << "nth error." << std::endl; return -1; } } } std::cout << "Success." << std::endl; return 0; }
cutlass/examples/cute/tutorial/tiled_copy.cu/0
{ "file_path": "cutlass/examples/cute/tutorial/tiled_copy.cu", "repo_id": "cutlass", "token_count": 3338 }
16
/*************************************************************************************************** * Copyright (c) 2024 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/tensor.hpp> #include <cute/atom/copy_atom.hpp> namespace cute { // // Prefetch global tensors into L2 // template <uint32_t NumThreads, uint32_t FetchBytes = 64, class GEngine, class GLayout> CUTE_HOST_DEVICE void cooperative_prefetch(uint32_t const& tid, Tensor<GEngine, GLayout> const& src) { static_assert(is_gmem<GEngine>::value, "Expected global tensor for prefetch"); constexpr int V = decltype(max_common_vector(src, src))::value; if constexpr (V > 1) { // L2 sector is 32B, default fetch granularity is 64B using VecType = conditional_t<(V * sizeof_bits_v<typename GEngine::value_type>) < (FetchBytes * 8), ArrayEngine<typename GEngine::value_type, V>, uint8_t[FetchBytes] >; Tensor src_v = recast<VecType const>(src); CUTE_UNROLL for (int i = tid; i < size(src_v); i += NumThreads) { prefetch(raw_pointer_cast(&src_v(i))); } } else { CUTE_UNROLL for (int i = tid; i < size(src); i += NumThreads) { prefetch(raw_pointer_cast(&src(i))); } } } template <class GEngine, class GLayout> CUTE_HOST_DEVICE void prefetch(Tensor<GEngine, GLayout> const& src) { return cooperative_prefetch<1>(0, src); } // Prefetch with copy atom namespace detail { template <class CopyOp, class = void> constexpr bool has_prefetch = false; template <class CopyOp> constexpr bool has_prefetch<CopyOp, void_t<typename CopyOp::PREFETCH>> = true; template <class CopyOp, class = void> constexpr bool is_prefetch = false; template <class CopyOp> constexpr bool is_prefetch<CopyOp, void_t<typename CopyOp::PREFETCH>> = is_same_v<CopyOp, typename CopyOp::PREFETCH>; } // end namespace detail template <class CopyOp, class... CT_Args, class... CA_Args, class GEngine, class GLayout> CUTE_HOST_DEVICE void prefetch(Copy_Atom<Copy_Traits<CopyOp, CT_Args...>, CA_Args...> const& atom, Tensor<GEngine, GLayout> const& src) { if constexpr (detail::has_prefetch<CopyOp>) { using Prefetch_Traits = Copy_Traits<typename CopyOp::PREFETCH, CT_Args...>; using Prefetch_Atom = Copy_Atom<Prefetch_Traits, CA_Args...>; Prefetch_Atom prefetch_atom{atom}; auto& dst = const_cast<Tensor<GEngine, GLayout>&>(src); // dst is ignored for prefetch atoms return copy(prefetch_atom, src, dst); } else { return prefetch(src); } } #if defined(CUTE_COPY_ATOM_TMA_SM90_ENABLED) template <class... CT_Args, class SrcEngine, class SrcLayout> CUTE_HOST_DEVICE void prefetch(Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...> const& atom, Tensor<SrcEngine, SrcLayout> const& src) { using SrcType = typename SrcEngine::value_type; static_assert(is_gmem<SrcEngine>::value, "Expected global tensor for L2 prefetch"); auto tiler = max_common_layout(src, src); constexpr int vec_elem = decltype(size(tiler))::value; constexpr int vec_bits = vec_elem * sizeof_bits_v<SrcType>; static_assert(vec_bits >= 128, "Expected at least 128-bits for BLKCP"); // Construct a new concrete Atom of the vector size auto bulk_atom = Copy_Atom<Copy_Traits<SM90_BULK_COPY_G2S, Int<vec_bits>>, SrcType>{}; return prefetch(bulk_atom, logical_divide(src, tiler)); } // Backwards-compat. Throw out any extra Copy_Atom args. template <class... CT_Args, class... CA_Args, class SrcEngine, class SrcLayout> CUTE_HOST_DEVICE void prefetch(Copy_Atom<Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...>, CA_Args...> const& atom, Tensor<SrcEngine, SrcLayout> const& src) { return prefetch(static_cast<Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...> const&>(atom), src); } #endif // #if defined(CUTE_COPY_ATOM_TMA_SM90_ENABLED) } // end namespace cute
cutlass/include/cute/algorithm/prefetch.hpp/0
{ "file_path": "cutlass/include/cute/algorithm/prefetch.hpp", "repo_id": "cutlass", "token_count": 2174 }
17
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/arch/mma.hpp> // Config #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) # define CUTE_ARCH_MMA_SM90_ENABLED # define CUTE_ARCH_MMA_F64_SM90_ENABLED #endif //////////////////////////////////////////////////////////////////////////////////////////////////// namespace cute { //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x4 TN struct SM90_16x8x4_F64F64F64F64_TN { using DRegisters = double[4]; using ARegisters = double[2]; using BRegisters = double[1]; using CRegisters = double[4]; CUTE_HOST_DEVICE static void fma(double & d0, double & d1, double & d2, double & d3, double const& a0, double const& a1, double const& b0, double const& c0, double const& c1, double const& c2, double const& c3) { #if defined(CUTE_ARCH_MMA_F64_SM90_ENABLED) asm volatile( "mma.sync.aligned.m16n8k4.row.col.f64.f64.f64.f64" "{%0, %1, %2, %3}," "{%4, %5}," "{%6}," "{%7, %8, %9, %10};\n" : "=d"(d0), "=d"(d1), "=d"(d2), "=d"(d3) : "d"(a0), "d"(a1), "d"(b0), "d"(c0), "d"(c1), "d"(c2), "d"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_16x8x4_F64F64F64F64_TN without CUTE_ARCH_MMA_SM90_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x8 TN struct SM90_16x8x8_F64F64F64F64_TN { using DRegisters = double[4]; using ARegisters = double[4]; using BRegisters = double[2]; using CRegisters = double[4]; CUTE_HOST_DEVICE static void fma(double & d0, double & d1, double & d2, double & d3, double const& a0, double const& a1, double const& a2, double const& a3, double const& b0, double const& b1, double const& c0, double const& c1, double const& c2, double const& c3) { #if defined(CUTE_ARCH_MMA_F64_SM90_ENABLED) asm volatile( "mma.sync.aligned.m16n8k8.row.col.f64.f64.f64.f64" "{%0, %1, %2, %3}," "{%4, %5, %6, %7}," "{%8, %9}," "{%10, %11, %12, %13};\n" : "=d"(d0), "=d"(d1), "=d"(d2), "=d"(d3) : "d"(a0), "d"(a1), "d"(a2), "d"(a3), "d"(b0), "d"(b1), "d"(c0), "d"(c1), "d"(c2), "d"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_16x8x8_F64F64F64F64_TN without CUTE_ARCH_MMA_SM90_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x16 TN struct SM90_16x8x16_F64F64F64F64_TN { using DRegisters = double[4]; using ARegisters = double[8]; using BRegisters = double[4]; using CRegisters = double[4]; CUTE_HOST_DEVICE static void fma(double & d0, double & d1, double & d2, double & d3, double const& a0, double const& a1, double const& a2, double const& a3, double const& a4, double const& a5, double const& a6, double const& a7, double const& b0, double const& b1, double const& b2, double const& b3, double const& c0, double const& c1, double const& c2, double const& c3) { #if defined(CUTE_ARCH_MMA_F64_SM90_ENABLED) asm volatile( "mma.sync.aligned.m16n8k16.row.col.f64.f64.f64.f64" "{%0, %1, %2, %3}," "{%4, %5, %6, %7, %8, %9, %10, %11}," "{%12, %13, %14, %15}," "{%16, %17, %18, %19};\n" : "=d"(d0), "=d"(d1), "=d"(d2), "=d"(d3) : "d"(a0), "d"(a1), "d"(a2), "d"(a3), "d"(a4), "d"(a5), "d"(a6), "d"(a7), "d"(b0), "d"(b1), "d"(b2), "d"(b3), "d"(c0), "d"(c1), "d"(c2), "d"(c3)); #else CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_16x8x16_F64F64F64F64_TN without CUTE_ARCH_MMA_SM90_ENABLED"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x4 TN struct SM90_16x8x4_C64C64C64C64_TN { using DRegisters = complex<double>[4]; using ARegisters = complex<double>[2]; using BRegisters = complex<double>[1]; using CRegisters = complex<double>[4]; CUTE_HOST_DEVICE static void fma(complex<double> & d0, complex<double> & d1, complex<double> & d2, complex<double> & d3, complex<double> const& a0, complex<double> const& a1, complex<double> const& b0, complex<double> const& c0, complex<double> const& c1, complex<double> const& c2, complex<double> const& c3) { // Because thrust::complex does not provide a mutable ref double& rd0 = reinterpret_cast<double(&)[2]>(d0)[0]; double& id0 = reinterpret_cast<double(&)[2]>(d0)[1]; double& rd1 = reinterpret_cast<double(&)[2]>(d1)[0]; double& id1 = reinterpret_cast<double(&)[2]>(d1)[1]; double& rd2 = reinterpret_cast<double(&)[2]>(d2)[0]; double& id2 = reinterpret_cast<double(&)[2]>(d2)[1]; double& rd3 = reinterpret_cast<double(&)[2]>(d3)[0]; double& id3 = reinterpret_cast<double(&)[2]>(d3)[1]; // d.real() = a.real() * b.real() + c.real(); SM90_16x8x4_F64F64F64F64_TN::fma( rd0, rd1, rd2, rd3, a0.real(), a1.real(), b0.real(), c0.real(), c1.real(), c2.real(), c3.real()); // d.imag() = a.imag() * b.real() + c.imag(); SM90_16x8x4_F64F64F64F64_TN::fma( id0, id1, id2, id3, a0.imag(), a1.imag(), b0.real(), c0.imag(), c1.imag(), c2.imag(), c3.imag()); // d.real() = -a.imag() * b.imag() + d.real(); SM90_16x8x4_F64F64F64F64_TN::fma( rd0, rd1, rd2, rd3, -a0.imag(), -a1.imag(), b0.imag(), d0.real(), d1.real(), d2.real(), d3.real()); // d.imag() = a.real() * b.imag() + d.imag(); SM90_16x8x4_F64F64F64F64_TN::fma( id0, id1, id2, id3, a0.real(), a1.real(), b0.imag(), d0.imag(), d1.imag(), d2.imag(), d3.imag()); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x8 TN struct SM90_16x8x8_C64C64C64C64_TN { using DRegisters = complex<double>[4]; using ARegisters = complex<double>[4]; using BRegisters = complex<double>[2]; using CRegisters = complex<double>[4]; CUTE_HOST_DEVICE static void fma(complex<double> & d0, complex<double> & d1, complex<double> & d2, complex<double> & d3, complex<double> const& a0, complex<double> const& a1, complex<double> const& a2, complex<double> const& a3, complex<double> const& b0, complex<double> const& b1, complex<double> const& c0, complex<double> const& c1, complex<double> const& c2, complex<double> const& c3) { // Because thrust::complex does not provide a mutable ref double& rd0 = reinterpret_cast<double(&)[2]>(d0)[0]; double& id0 = reinterpret_cast<double(&)[2]>(d0)[1]; double& rd1 = reinterpret_cast<double(&)[2]>(d1)[0]; double& id1 = reinterpret_cast<double(&)[2]>(d1)[1]; double& rd2 = reinterpret_cast<double(&)[2]>(d2)[0]; double& id2 = reinterpret_cast<double(&)[2]>(d2)[1]; double& rd3 = reinterpret_cast<double(&)[2]>(d3)[0]; double& id3 = reinterpret_cast<double(&)[2]>(d3)[1]; // d.real() = a.real() * b.real() + c.real(); SM90_16x8x8_F64F64F64F64_TN::fma( rd0, rd1, rd2, rd3, a0.real(), a1.real(), a2.real(), a3.real(), b0.real(), b1.real(), c0.real(), c1.real(), c2.real(), c3.real()); // d.imag() = a.imag() * b.real() + c.imag(); SM90_16x8x8_F64F64F64F64_TN::fma( id0, id1, id2, id3, a0.imag(), a1.imag(), a2.imag(), a3.imag(), b0.real(), b1.real(), c0.imag(), c1.imag(), c2.imag(), c3.imag()); // d.real() = -a.imag() * b.imag() + d.real(); SM90_16x8x8_F64F64F64F64_TN::fma( rd0, rd1, rd2, rd3, -a0.imag(), -a1.imag(), -a2.imag(), -a3.imag(), b0.imag(), b1.imag(), d0.real(), d1.real(), d2.real(), d3.real()); // d.imag() = a.real() * b.imag() + d.imag(); SM90_16x8x8_F64F64F64F64_TN::fma( id0, id1, id2, id3, a0.real(), a1.real(), a2.real(), a3.real(), b0.imag(), b1.imag(), d0.imag(), d1.imag(), d2.imag(), d3.imag()); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // MMA 16x8x16 TN struct SM90_16x8x16_C64C64C64C64_TN { using DRegisters = complex<double>[4]; using ARegisters = complex<double>[8]; using BRegisters = complex<double>[4]; using CRegisters = complex<double>[4]; CUTE_HOST_DEVICE static void fma(complex<double> & d0, complex<double> & d1, complex<double> & d2, complex<double> & d3, complex<double> const& a0, complex<double> const& a1, complex<double> const& a2, complex<double> const& a3, complex<double> const& a4, complex<double> const& a5, complex<double> const& a6, complex<double> const& a7, complex<double> const& b0, complex<double> const& b1, complex<double> const& b2, complex<double> const& b3, complex<double> const& c0, complex<double> const& c1, complex<double> const& c2, complex<double> const& c3) { // Because thrust::complex does not provide a mutable ref double& rd0 = reinterpret_cast<double(&)[2]>(d0)[0]; double& id0 = reinterpret_cast<double(&)[2]>(d0)[1]; double& rd1 = reinterpret_cast<double(&)[2]>(d1)[0]; double& id1 = reinterpret_cast<double(&)[2]>(d1)[1]; double& rd2 = reinterpret_cast<double(&)[2]>(d2)[0]; double& id2 = reinterpret_cast<double(&)[2]>(d2)[1]; double& rd3 = reinterpret_cast<double(&)[2]>(d3)[0]; double& id3 = reinterpret_cast<double(&)[2]>(d3)[1]; // d.real() = a.real() * b.real() + c.real(); SM90_16x8x16_F64F64F64F64_TN::fma( rd0, rd1, rd2, rd3, a0.real(), a1.real(), a2.real(), a3.real(), a4.real(), a5.real(), a6.real(), a7.real(), b0.real(), b1.real(), b2.real(), b3.real(), c0.real(), c1.real(), c2.real(), c3.real()); // d.imag() = a.imag() * b.real() + c.imag(); SM90_16x8x16_F64F64F64F64_TN::fma( id0, id1, id2, id3, a0.imag(), a1.imag(), a2.imag(), a3.imag(), a4.imag(), a5.imag(), a6.imag(), a7.imag(), b0.real(), b1.real(), b2.real(), b3.real(), c0.imag(), c1.imag(), c2.imag(), c3.imag()); // d.real() = -a.imag() * b.imag() + d.real(); SM90_16x8x16_F64F64F64F64_TN::fma( rd0, rd1, rd2, rd3, -a0.imag(), -a1.imag(), -a2.imag(), -a3.imag(), -a4.imag(), -a5.imag(), -a6.imag(), -a7.imag(), b0.imag(), b1.imag(), b2.imag(), b3.imag(), d0.real(), d1.real(), d2.real(), d3.real()); // d.imag() = a.real() * b.imag() + d.imag(); SM90_16x8x16_F64F64F64F64_TN::fma( id0, id1, id2, id3, a0.real(), a1.real(), a2.real(), a3.real(), a4.real(), a5.real(), a6.real(), a7.real(), b0.imag(), b1.imag(), b2.imag(), b3.imag(), d0.imag(), d1.imag(), d2.imag(), d3.imag()); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cute //////////////////////////////////////////////////////////////////////////////////////////////////// #include <cute/arch/mma_sm90_desc.hpp> #include <cute/arch/mma_sm90_gmma.hpp> //////////////////////////////////////////////////////////////////////////////////////////////////// namespace cute { namespace GMMA { template < class ElementA, class ElementB, class ElementC, class TileShape_MNK, GMMA::Major MajorA = GMMA::Major::K, GMMA::Major MajorB = GMMA::Major::K, auto... Args // e.g. GMMA::ScaleOut::One, [GMMA::ScaleIn::One, GMMA::ScaleIn::One] // But most commonly leave empty for defaults > CUTE_HOST_DEVICE constexpr auto ss_op_selector() { static_assert(is_static<TileShape_MNK>::value, "TileShape_MNK must be static."); static_assert(rank(TileShape_MNK{}) == 3, "TileShape_MNK must be rank 3."); static_assert(size<0>(TileShape_MNK{}) % 64 == 0, "Tile_M must be a multiple of 64."); auto Tile_N = size<1>(TileShape_MNK{}); // FP16 accumulator if constexpr (is_same_v<ElementC, half_t>) { if constexpr (is_same_v<ElementA, half_t> && is_same_v<ElementB, half_t>) { static_assert(size<2>(TileShape_MNK{}) % 16 == 0, "Tile_K must be a multiple of 16."); // Dispatch against the Tile N mode size if constexpr (Tile_N % 256 == 0) { return SM90_64x256x16_F16F16F16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x16_F16F16F16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x16_F16F16F16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x16_F16F16F16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x16_F16F16F16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x16_F16F16F16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x16_F16F16F16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x16_F16F16F16_SS<MajorA, MajorB, Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // FP8 // Input A: float_e4m3_t ; Input B: float_e4m3_t else if constexpr (is_same_v<ElementA, float_e4m3_t> && is_same_v<ElementB, float_e4m3_t>) { static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config."); static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_F16E4M3E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_F16E4M3E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_F16E4M3E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_F16E4M3E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_F16E4M3E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_F16E4M3E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_F16E4M3E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_F16E4M3E4M3_SS_TN<Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // FP8 // Input A: float_e4m3_t ; Input B: float_e5m2_t else if constexpr (is_same_v<ElementA, float_e4m3_t> && is_same_v<ElementB, float_e5m2_t>) { static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config."); static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_F16E4M3E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_F16E4M3E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_F16E4M3E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_F16E4M3E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_F16E4M3E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_F16E4M3E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_F16E4M3E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_F16E4M3E5M2_SS_TN<Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // FP8 // Input A: float_e5m2_t ; Input B: float_e5m2_t else if constexpr (is_same_v<ElementA, float_e5m2_t> && is_same_v<ElementB, float_e5m2_t>) { static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config."); static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_F16E5M2E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_F16E5M2E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_F16E5M2E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_F16E5M2E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_F16E5M2E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_F16E5M2E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_F16E5M2E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_F16E5M2E5M2_SS_TN<Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // FP8 // Input A: float_e5m2_t ; Input B: float_e4m3_t else if constexpr (is_same_v<ElementA, float_e5m2_t> && is_same_v<ElementB, float_e4m3_t>) { static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config."); static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_F16E5M2E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_F16E5M2E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_F16E5M2E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_F16E5M2E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_F16E5M2E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_F16E5M2E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_F16E5M2E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_F16E5M2E4M3_SS_TN<Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } else { static_assert(sizeof(ElementA) == 0, "No eligible GMMA operator for request configuration."); } } // FP32 accumulator else if constexpr (is_same_v<ElementC, float>) { // FP16 inputs if constexpr (is_same_v<ElementA, half_t>) { static_assert(is_same_v<ElementA, ElementB>, "ElementA and ElementB must be the same type for this config."); static_assert(size<2>(TileShape_MNK{}) % 16 == 0, "Tile_K must be a multiple of 16."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x16_F32F16F16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x16_F32F16F16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x16_F32F16F16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x16_F32F16F16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x16_F32F16F16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x16_F32F16F16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x16_F32F16F16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x16_F32F16F16_SS<MajorA, MajorB, Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // BF16 inputs else if constexpr (is_same_v<ElementA, bfloat16_t>) { static_assert(is_same_v<ElementA, ElementB>, "ElementA and ElementB must be the same type for this config."); static_assert(size<2>(TileShape_MNK{}) % 16 == 0, "Tile_K must be a multiple of 16."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // TF32 inputs else if constexpr (is_same_v<ElementA, tfloat32_t>) { static_assert(is_same_v<ElementA, ElementB>, "ElementA and ElementB must be the same type for this config."); static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config."); static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 8 == 0, "Tile_K must be a multiple of 8."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x8_F32TF32TF32_SS_TN<Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x8_F32TF32TF32_SS_TN<Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x8_F32TF32TF32_SS_TN<Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x8_F32TF32TF32_SS_TN<Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x8_F32TF32TF32_SS_TN<Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x8_F32TF32TF32_SS_TN<Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x8_F32TF32TF32_SS_TN<Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x8_F32TF32TF32_SS_TN<Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // FP8 // Input A: float_e4m3_t ; Input B: float_e4m3_t else if constexpr (is_same_v<ElementA, float_e4m3_t> && is_same_v<ElementB, float_e4m3_t>) { static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config."); static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_F32E4M3E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_F32E4M3E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_F32E4M3E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_F32E4M3E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_F32E4M3E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_F32E4M3E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_F32E4M3E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_F32E4M3E4M3_SS_TN<Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // FP8 // Input A: float_e4m3_t ; Input B: float_e5m2_t else if constexpr (is_same_v<ElementA, float_e4m3_t> && is_same_v<ElementB, float_e5m2_t>) { static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config."); static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_F32E4M3E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_F32E4M3E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_F32E4M3E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_F32E4M3E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_F32E4M3E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_F32E4M3E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_F32E4M3E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_F32E4M3E5M2_SS_TN<Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // FP8 // Input A: float_e5m2_t ; Input B: float_e5m2_t else if constexpr (is_same_v<ElementA, float_e5m2_t> && is_same_v<ElementB, float_e5m2_t>) { static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config."); static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_F32E5M2E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_F32E5M2E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_F32E5M2E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_F32E5M2E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_F32E5M2E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_F32E5M2E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_F32E5M2E5M2_SS_TN<Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_F32E5M2E5M2_SS_TN<Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // FP8 // Input A: float_e5m2_t ; Input B: float_e4m3_t else if constexpr (is_same_v<ElementA, float_e5m2_t> && is_same_v<ElementB, float_e4m3_t>) { static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config."); static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_F32E5M2E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_F32E5M2E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_F32E5M2E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_F32E5M2E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_F32E5M2E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_F32E5M2E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_F32E5M2E4M3_SS_TN<Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_F32E5M2E4M3_SS_TN<Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } else { static_assert(sizeof(ElementA) == 0, "No eligible GMMA operator for request configuration."); } } // S32 accumulator else if constexpr (is_same_v<ElementC, int32_t>) { static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config."); static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); // ElementA == int8_t && ElementB == int8_t if constexpr (is_same_v<ElementA, int8_t> && is_same_v<ElementB, int8_t>) { if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_S32S8S8_SS_TN{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_S32S8S8_SS_TN{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_S32S8S8_SS_TN{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_S32S8S8_SS_TN{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_S32S8S8_SS_TN{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_S32S8S8_SS_TN{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_S32S8S8_SS_TN{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_S32S8S8_SS_TN{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // ElementA == int8_t && ElementB == uint8_t else if constexpr (is_same_v<ElementA, int8_t> && is_same_v<ElementB, uint8_t>) { static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_S32S8U8_SS_TN{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_S32S8U8_SS_TN{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_S32S8U8_SS_TN{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_S32S8U8_SS_TN{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_S32S8U8_SS_TN{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_S32S8U8_SS_TN{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_S32S8U8_SS_TN{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_S32S8U8_SS_TN{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // ElementA == uint8_t && ElementB == int8_t else if constexpr (is_same_v<ElementA, uint8_t> && is_same_v<ElementB, int8_t>) { static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_S32U8S8_SS_TN{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_S32U8S8_SS_TN{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_S32U8S8_SS_TN{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_S32U8S8_SS_TN{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_S32U8S8_SS_TN{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_S32U8S8_SS_TN{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_S32U8S8_SS_TN{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_S32U8S8_SS_TN{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // ElementA == uint8_t && ElementB == uint8_t else if constexpr (is_same_v<ElementA, uint8_t> && is_same_v<ElementB, uint8_t>) { static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_S32U8U8_SS_TN{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_S32U8U8_SS_TN{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_S32U8U8_SS_TN{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_S32U8U8_SS_TN{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_S32U8U8_SS_TN{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_S32U8U8_SS_TN{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_S32U8U8_SS_TN{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_S32U8U8_SS_TN{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } } // Unknown accumulator type else { static_assert(sizeof(ElementC) == 0, "Unknown ElementC accumulator type."); } } template < class ElementA, class ElementB, class ElementC, class TileShape_MNK, GMMA::Major MajorA = GMMA::Major::K, GMMA::Major MajorB = GMMA::Major::K, auto... Args // e.g. GMMA::ScaleOut::One, [GMMA::ScaleIn::One, GMMA::ScaleIn::One] // But most commonly leave empty for defaults > CUTE_HOST_DEVICE constexpr auto rs_op_selector() { static_assert(is_static<TileShape_MNK>::value, "TileShape_MNK must be static."); static_assert(rank(TileShape_MNK{}) == 3, "TileShape_MNK must be rank 3."); static_assert(size<0>(TileShape_MNK{}) % 64 == 0, "Tile_M must be a multiple of 64."); static_assert(MajorA == GMMA::Major::K, "Register source A operand GMMAs must have K-major A layout."); auto Tile_N = size<1>(TileShape_MNK{}); // FP16 accumulator if constexpr (is_same_v<ElementC, half_t>) { static_assert(is_same_v<ElementA, half_t>, "Element types for AB must be half if ElementC is half."); static_assert(is_same_v<ElementB, half_t>, "Element types for AB must be half if ElementC is half."); static_assert(size<2>(TileShape_MNK{}) % 16 == 0, "Tile_K must be a multiple of 16."); // Dispatch against the Tile N mode size if constexpr (Tile_N % 256 == 0) { return SM90_64x256x16_F16F16F16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x16_F16F16F16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x16_F16F16F16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x16_F16F16F16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x16_F16F16F16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x16_F16F16F16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x16_F16F16F16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x16_F16F16F16_RS<MajorA, MajorB, Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // FP32 accumulator else if constexpr (is_same_v<ElementC, float>) { // FP16 inputs if constexpr (is_same_v<ElementA, half_t>) { static_assert(size<2>(TileShape_MNK{}) % 16 == 0, "Tile_K must be a multiple of 16."); static_assert(is_same_v<ElementA, ElementB>, "ElementA and ElementB must be the same type for this config."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x16_F32F16F16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x16_F32F16F16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x16_F32F16F16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x16_F32F16F16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x16_F32F16F16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x16_F32F16F16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x16_F32F16F16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x16_F32F16F16_RS<MajorA, MajorB, Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // BF16 inputs else if constexpr (is_same_v<ElementA, bfloat16_t>) { static_assert(size<2>(TileShape_MNK{}) % 16 == 0, "Tile_K must be a multiple of 16."); static_assert(is_same_v<ElementA, ElementB>, "ElementA and ElementB must be the same type for this config."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // TF32 inputs else if constexpr (is_same_v<ElementA, tfloat32_t>) { static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 8 == 0, "Tile_K must be a multiple of 8."); static_assert(is_same_v<ElementA, ElementB>, "ElementA and ElementB must be the same type for this config."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x8_F32TF32TF32_RS_TN<Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x8_F32TF32TF32_RS_TN<Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x8_F32TF32TF32_RS_TN<Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x8_F32TF32TF32_RS_TN<Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x8_F32TF32TF32_RS_TN<Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x8_F32TF32TF32_RS_TN<Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x8_F32TF32TF32_RS_TN<Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x8_F32TF32TF32_RS_TN<Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // FP8 // Input A: float_e4m3_t ; Input B: float_e4m3_t else if constexpr (is_same_v<ElementA, float_e4m3_t> && is_same_v<ElementB, float_e4m3_t>) { static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config."); static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_F32E4M3E4M3_RS_TN<Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_F32E4M3E4M3_RS_TN<Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_F32E4M3E4M3_RS_TN<Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_F32E4M3E4M3_RS_TN<Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_F32E4M3E4M3_RS_TN<Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_F32E4M3E4M3_RS_TN<Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_F32E4M3E4M3_RS_TN<Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_F32E4M3E4M3_RS_TN<Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // FP8 // Input A: float_e4m3_t ; Input B: float_e5m2_t else if constexpr (is_same_v<ElementA, float_e4m3_t> && is_same_v<ElementB, float_e5m2_t>) { static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config."); static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_F32E4M3E5M2_RS_TN<Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_F32E4M3E5M2_RS_TN<Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_F32E4M3E5M2_RS_TN<Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_F32E4M3E5M2_RS_TN<Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_F32E4M3E5M2_RS_TN<Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_F32E4M3E5M2_RS_TN<Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_F32E4M3E5M2_RS_TN<Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_F32E4M3E5M2_RS_TN<Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // FP8 // Input A: float_e5m2_t ; Input B: float_e5m2_t else if constexpr (is_same_v<ElementA, float_e5m2_t> && is_same_v<ElementB, float_e5m2_t>) { static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config."); static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_F32E5M2E5M2_RS_TN<Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_F32E5M2E5M2_RS_TN<Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_F32E5M2E5M2_RS_TN<Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_F32E5M2E5M2_RS_TN<Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_F32E5M2E5M2_RS_TN<Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_F32E5M2E5M2_RS_TN<Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_F32E5M2E5M2_RS_TN<Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_F32E5M2E5M2_RS_TN<Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // FP8 // Input A: float_e5m2_t ; Input B: float_e4m3_t else if constexpr (is_same_v<ElementA, float_e5m2_t> && is_same_v<ElementB, float_e4m3_t>) { static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config."); static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_F32E5M2E4M3_RS_TN<Args...>{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_F32E5M2E4M3_RS_TN<Args...>{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_F32E5M2E4M3_RS_TN<Args...>{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_F32E5M2E4M3_RS_TN<Args...>{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_F32E5M2E4M3_RS_TN<Args...>{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_F32E5M2E4M3_RS_TN<Args...>{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_F32E5M2E4M3_RS_TN<Args...>{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_F32E5M2E4M3_RS_TN<Args...>{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } else { static_assert(sizeof(ElementA) == 0, "No eligible GMMA operator for request configuration."); } } // S32 accumulator else if constexpr (is_same_v<ElementC, int32_t>) { static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config."); static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); // ElementA == int8_t && ElementB == int8_t if constexpr (is_same_v<ElementA, int8_t> && is_same_v<ElementB, int8_t>) { if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_S32S8S8_RS_TN{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_S32S8S8_RS_TN{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_S32S8S8_RS_TN{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_S32S8S8_RS_TN{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_S32S8S8_RS_TN{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_S32S8S8_RS_TN{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_S32S8S8_RS_TN{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_S32S8S8_RS_TN{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // ElementA == int8_t && ElementB == uint8_t else if constexpr (is_same_v<ElementA, int8_t> && is_same_v<ElementB, uint8_t>) { static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_S32S8U8_RS_TN{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_S32S8U8_RS_TN{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_S32S8U8_RS_TN{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_S32S8U8_RS_TN{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_S32S8U8_RS_TN{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_S32S8U8_RS_TN{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_S32S8U8_RS_TN{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_S32S8U8_RS_TN{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // ElementA == uint8_t && ElementB == int8_t else if constexpr (is_same_v<ElementA, uint8_t> && is_same_v<ElementB, int8_t>) { static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_S32U8S8_RS_TN{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_S32U8S8_RS_TN{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_S32U8S8_RS_TN{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_S32U8S8_RS_TN{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_S32U8S8_RS_TN{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_S32U8S8_RS_TN{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_S32U8S8_RS_TN{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_S32U8S8_RS_TN{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } // ElementA == uint8_t && ElementB == uint8_t else if constexpr (is_same_v<ElementA, uint8_t> && is_same_v<ElementB, uint8_t>) { static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32."); if constexpr (Tile_N % 256 == 0) { return SM90_64x256x32_S32U8U8_RS_TN{}; } else if constexpr (Tile_N % 192 == 0) { return SM90_64x192x32_S32U8U8_RS_TN{}; } else if constexpr (Tile_N % 128 == 0) { return SM90_64x128x32_S32U8U8_RS_TN{}; } else if constexpr (Tile_N % 96 == 0) { return SM90_64x96x32_S32U8U8_RS_TN{}; } else if constexpr (Tile_N % 64 == 0) { return SM90_64x64x32_S32U8U8_RS_TN{}; } else if constexpr (Tile_N % 32 == 0) { return SM90_64x32x32_S32U8U8_RS_TN{}; } else if constexpr (Tile_N % 16 == 0) { return SM90_64x16x32_S32U8U8_RS_TN{}; } else if constexpr (Tile_N % 8 == 0) { return SM90_64x8x32_S32U8U8_RS_TN{}; } else { static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8."); } } } // Unknown accumulator type else { static_assert(sizeof(ElementC) == 0, "Unknown ElementC accumulator type."); } } } // end namespace GMMA } // end namespace cute ////////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cute/arch/mma_sm90.hpp/0
{ "file_path": "cutlass/include/cute/arch/mma_sm90.hpp", "repo_id": "cutlass", "token_count": 26365 }
18
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/arch/mma_sm70.hpp> #include <cute/atom/mma_traits.hpp> #include <cute/layout.hpp> namespace cute { namespace { // Logical thread id to thread idx (quadpair) using SM70_QuadPair = Layout<Shape <_4, _2>, Stride<_1,_16>>; // (T8,V4) -> (M8,K4) using SM70_8x4_Row = Layout<Shape <_8,_4>, Stride<_1,_8>>; // (T8,V4) -> (M8,K4) using SM70_8x4_Col = Layout<Shape <Shape <_4,_2>,_4>, Stride<Stride<_8,_4>,_1>>; // (T8,V8) -> (M8,N8) using SM70_8x8_16b = Layout<Shape <_8,_8>, Stride<_1,_8>>; // (T8,V8) -> (M8,N8) using SM70_8x8_32b = Layout<Shape <Shape <_2, _2,_2>,Shape <_2,_2, _2>>, Stride<Stride<_1,_16,_4>,Stride<_8,_2,_32>>>; } /////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM70_8x8x4_F16F16F16F16_TN> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using Shape_MNK = Shape<_8,_8,_4>; using ThrID = SM70_QuadPair; using ALayout = SM70_8x4_Row; using BLayout = SM70_8x4_Row; using CLayout = SM70_8x8_16b; }; /////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM70_8x8x4_F16F16F16F16_NT> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using Shape_MNK = Shape<_8,_8,_4>; using ThrID = SM70_QuadPair; using ALayout = SM70_8x4_Col; using BLayout = SM70_8x4_Col; using CLayout = SM70_8x8_16b; }; /////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM70_8x8x4_F16F16F16F16_NN> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using Shape_MNK = Shape<_8,_8,_4>; using ThrID = SM70_QuadPair; using ALayout = SM70_8x4_Col; using BLayout = SM70_8x4_Row; using CLayout = SM70_8x8_16b; }; /////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM70_8x8x4_F16F16F16F16_TT> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using Shape_MNK = Shape<_8,_8,_4>; using ThrID = SM70_QuadPair; using ALayout = SM70_8x4_Row; using BLayout = SM70_8x4_Col; using CLayout = SM70_8x8_16b; }; /////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM70_8x8x4_F32F16F16F32_TN> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using Shape_MNK = Shape<_8,_8,_4>; using ThrID = SM70_QuadPair; using ALayout = SM70_8x4_Row; using BLayout = SM70_8x4_Row; using CLayout = SM70_8x8_32b; }; /////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM70_8x8x4_F32F16F16F32_NT> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using Shape_MNK = Shape<_8,_8,_4>; using ThrID = SM70_QuadPair; using ALayout = SM70_8x4_Col; using BLayout = SM70_8x4_Col; using CLayout = SM70_8x8_32b; }; /////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM70_8x8x4_F32F16F16F32_NN> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using Shape_MNK = Shape<_8,_8,_4>; using ThrID = SM70_QuadPair; using ALayout = SM70_8x4_Col; using BLayout = SM70_8x4_Row; using CLayout = SM70_8x8_32b; }; /////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM70_8x8x4_F32F16F16F32_TT> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using Shape_MNK = Shape<_8,_8,_4>; using ThrID = SM70_QuadPair; using ALayout = SM70_8x4_Row; using BLayout = SM70_8x4_Col; using CLayout = SM70_8x8_32b; }; /////////////////////////////////////////////////////////////////////////////// } // namespace cute
cutlass/include/cute/atom/mma_traits_sm70.hpp/0
{ "file_path": "cutlass/include/cute/atom/mma_traits_sm70.hpp", "repo_id": "cutlass", "token_count": 2332 }
19
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/layout.hpp> /* This implements a ComposedLayout of the form * LayoutA o Offset o LayoutB * and is useful in cases where composition() does not or cannot apply to LayoutA and LayoutB. * For example, when the "divisibility condition" in shape_div is violated in composition(LayoutA, LayoutB). * * This ComposedLayout provides similar functionality to Layout including tiling, partitioning, * coordinate-to-index mapping and layout manipulations, but is not considered a "normal" layout. * For example, this layout provides shape() and size() functions, but does not provide stride() functions. * Mostly, the similar functionality is accomplished by applying each operation to LayoutB only * as LayoutB defines the domain. */ namespace cute { // A Layout of non-trivially composable functions: F o I o L template <class LayoutA, class Offset, class LayoutB> struct ComposedLayout : private cute::tuple<LayoutA, Offset, LayoutB> // EBO for static layouts { CUTE_HOST_DEVICE constexpr ComposedLayout(LayoutA const& layoutA = {}, Offset const& offset = {}, LayoutB const& layoutB = {}) : cute::tuple<LayoutA, Offset, LayoutB>(layoutA, offset, layoutB) {} // // Accessors // static constexpr int rank = LayoutB::rank; CUTE_HOST_DEVICE constexpr decltype(auto) layout_a() const { return get<0>(static_cast<cute::tuple<LayoutA, Offset, LayoutB> const&>(*this)); } CUTE_HOST_DEVICE constexpr decltype(auto) offset() const { return get<1>(static_cast<cute::tuple<LayoutA, Offset, LayoutB> const&>(*this)); } CUTE_HOST_DEVICE constexpr decltype(auto) layout_b() const { return get<2>(static_cast<cute::tuple<LayoutA, Offset, LayoutB> const&>(*this)); } CUTE_HOST_DEVICE constexpr decltype(auto) layout() const { return *this; } CUTE_HOST_DEVICE constexpr decltype(auto) shape() const { return layout_b().shape(); } // Doesn't really make sense to ask for the strides of this "layout" CUTE_HOST_DEVICE constexpr decltype(auto) stride() const = delete; // // Mappings // // Map a logical coordinate to a linear index (Coord has no Underscore slice operators) // OR // Slice the layout and return the sublayout (Coord has an Underscore slice op) template <class Coord> CUTE_HOST_DEVICE constexpr auto operator()(Coord const& coord) const { if constexpr (has_underscore<Coord>::value) { return slice(coord, *this); } else { return layout_a()(offset() + layout_b()(coord)); // (A o O o B)(c) } CUTE_GCC_UNREACHABLE; } // Convenience function for multi-dimensional coordinates template <class Coord0, class Coord1, class... Coords> CUTE_HOST_DEVICE constexpr auto operator()(Coord0 const& c0, Coord1 const& c1, Coords const&... cs) const { return operator()(make_coord(c0,c1,cs...)); } // // Compose // template <class OtherLayout> CUTE_HOST_DEVICE constexpr auto compose(OtherLayout const& other) const { return composition(*this, other); } template <class... Layouts> CUTE_HOST_DEVICE constexpr auto compose(Layouts const&... layouts) const { return composition(*this, make_tile(layouts...)); } template <class OtherShape> CUTE_HOST_DEVICE constexpr auto with_shape(OtherShape const& shape) const { return composition(*this, make_layout(shape)); } template <class... Shapes> CUTE_HOST_DEVICE constexpr auto with_shape(Shapes const&... shapes) const { return composition(*this, make_layout(make_shape(shapes...))); } // // Tile // template <class OtherLayout> CUTE_HOST_DEVICE constexpr auto tile(OtherLayout const& other) const { return tiled_divide(*this, other); } template <class... Layouts> CUTE_HOST_DEVICE constexpr auto tile(Layouts const&... layouts) const { return tiled_divide(*this, make_tile(layouts...)); } // Equality, return a static or dynamic boolean template <class... Args> CUTE_HOST_DEVICE constexpr auto operator==(ComposedLayout<Args...> const& other) const { return this->layout_a() == other.layout_a() && this->layout_b() == other.layout_b() && this->offset() == other.offset(); } }; template <class A, class O, class B> struct is_layout<ComposedLayout<A,O,B>> : true_type {}; template <class T> struct is_composed_layout : false_type {}; template <class A, class O, class B> struct is_composed_layout<ComposedLayout<A,O,B>> : true_type {}; // // Constructors // template <class LayoutA, class Offset, class LayoutB> CUTE_HOST_DEVICE constexpr auto make_composed_layout(LayoutA const& layoutA, Offset const& offset, LayoutB const& layoutB) { return ComposedLayout<LayoutA, Offset, LayoutB>{layoutA, offset, layoutB}; } // // Utilities // // Return the layout of a mode template <int... Is, class A, class O, class B> CUTE_HOST_DEVICE constexpr decltype(auto) layout(ComposedLayout<A,O,B> const& clayout) { return composition(clayout.layout_a(), clayout.offset(), layout<Is...>(clayout.layout_b())); } // Return the shape of a mode template <int... Is, class A, class O, class B> CUTE_HOST_DEVICE constexpr decltype(auto) shape(ComposedLayout<A,O,B> const& layout) { return shape<Is...>(layout.layout_b()); } // Doesn't make sense to directly ask for the strides of this "layout" template <int... Is, class Fn, class O, class Layout> CUTE_HOST_DEVICE constexpr decltype(auto) stride(ComposedLayout<Fn,O,Layout> const& layout) = delete; // Return the number of elements in a mode template <int... Is, class A, class O, class B> CUTE_HOST_DEVICE constexpr decltype(auto) size(ComposedLayout<A,O,B> const& layout) { return size<Is...>(layout.layout_b()); } // Return the number of modes template <int... Is, class A, class O, class B> CUTE_HOST_DEVICE constexpr auto rank(ComposedLayout<A,O,B> const& layout) { return rank<Is...>(layout.layout_b()); } // Return the depth of the layout template <int... Is, class A, class O, class B> CUTE_HOST_DEVICE constexpr auto depth(ComposedLayout<A,O,B> const& layout) { return depth<Is...>(layout.layout_b()); } // Return the codomain size of a mode template <int... Is, class A, class O, class B> CUTE_HOST_DEVICE constexpr auto cosize(ComposedLayout<A,O,B> const& layout) { return cosize<Is...>(layout.layout_b()); } // // Operations to manipulate Layouts like a tuple of pairs // template <size_t I, class A, class O, class B> CUTE_HOST_DEVICE constexpr auto get(ComposedLayout<A,O,B> const& a) { return composition(a.layout_a(), a.offset(), get<I>(a.layout_b())); } template <int Begin, int End, class A, class O, class B> CUTE_HOST_DEVICE constexpr auto take(ComposedLayout<A,O,B> const& a) { return composition(a.layout_a(), a.offset(), take<Begin,End>(a.layout_b())); } template <class A, class O, class B> CUTE_HOST_DEVICE constexpr auto flatten(ComposedLayout<A,O,B> const& a) { return composition(a.layout_a(), a.offset(), flatten(a.layout_b())); } template <int N, class A, class O, class B, class X> CUTE_HOST_DEVICE constexpr auto append(ComposedLayout<A,O,B> const& a, X const& x) { return composition(a.layout_a(), a.offset(), append<N>(a.layout_b(), x)); } template <int Begin, int End, class A, class O, class B> CUTE_HOST_DEVICE constexpr auto group(ComposedLayout<A,O,B> const& a) { return composition(a.layout_a(), a.offset(), group<Begin,End>(a.layout_b())); } // // Slice a ComposedLayout // template <class Coord, class A, class O, class B> CUTE_HOST_DEVICE constexpr auto slice_and_offset(Coord const& coord, ComposedLayout<A,O,B> const& layout) { auto [slice, offset] = slice_and_offset(coord, layout.layout_b()); return cute::make_tuple(ComposedLayout{layout.layout_a(), layout.offset() + offset, slice}, Int<0>{}); } template <class Coord, class A, class O, class B> CUTE_HOST_DEVICE constexpr auto slice(Coord const& coord, ComposedLayout<A,O,B> const& layout) { return get<0>(slice_and_offset(coord, layout)); } // Compute a pointer offset and (potentially modified) layout from a coordinate // For composed layout tensors the offset is accumulated in the layout itself while pointer is not updated template <class Coord, class A, class O, class B> CUTE_HOST_DEVICE constexpr auto domain_offset(Coord const& coord, ComposedLayout<A,O,B> const& layout) { return cute::make_tuple(ComposedLayout{layout.layout_a(), layout.offset() + layout.layout_b()(coord), layout.layout_b()}, Int<0>{}); } // // composition // template <class LayoutA, class Offset, class LayoutB> CUTE_HOST_DEVICE constexpr auto composition(LayoutA const& layoutA, Offset const& offset, LayoutB const& layoutB) { return ComposedLayout<LayoutA, Offset, LayoutB>{layoutA, offset, layoutB}; } template <class A, class O, class B, class Tiler> CUTE_HOST_DEVICE constexpr auto composition(ComposedLayout<A,O,B> const& a, Tiler const& b) { return composition(a.layout_a(), a.offset(), composition(a.layout_b(), b)); } template <class ShapeA, class StrideA, class A, class O, class B> CUTE_HOST_DEVICE constexpr auto composition(Layout<ShapeA,StrideA> const& a, ComposedLayout<A,O,B> const& b) { CUTE_STATIC_ASSERT_V(b.offset() == Int<0>{}, "Require offset == 0."); return composition(composition(a, b.layout_a()), b.layout_b()); } // // complement // template <class A, class O, class B, class CoTarget> CUTE_HOST_DEVICE constexpr auto complement(ComposedLayout<A,O,B> const& layout, CoTarget const& cotarget) { return complement(layout.layout_b(), cotarget); } template <class A, class O, class B> CUTE_HOST_DEVICE constexpr auto complement(ComposedLayout<A,O,B> const& layout) { return complement(layout, cosize(layout)); } // // inverse // template <class A, class O, class B> CUTE_HOST_DEVICE constexpr auto right_inverse(ComposedLayout<A,O,B> const& layout) { return composition(right_inverse(layout.layout_b()), right_inverse(layout.offset()), right_inverse(layout.layout_a())); } template <class A, class O, class B> CUTE_HOST_DEVICE constexpr auto left_inverse(ComposedLayout<A,O,B> const& layout) { return composition(left_inverse(layout.layout_b()), left_inverse(layout.offset()), left_inverse(layout.layout_a())); } // // Other operations // template <class A, class O, class B> CUTE_HOST_DEVICE constexpr auto zip(ComposedLayout<A,O,B> const& a) { return composition(a.layout_a(), a.offset(), zip(a.layout_b())); } // Partitions template <class A, class O, class B, class Tiler> CUTE_HOST_DEVICE constexpr auto logical_divide(ComposedLayout<A,O,B> const& a, Tiler const& b) { return composition(a.layout_a(), a.offset(), logical_divide(a.layout_b(), b)); } template <class A, class O, class B, class Tiler> CUTE_HOST_DEVICE constexpr auto tile_unzip(ComposedLayout<A,O,B> const& a, Tiler const& b) { return composition(a.layout_a(), a.offset(), tile_unzip(a.layout_b(), b)); } template <class A, class O, class B, class Tiler> CUTE_HOST_DEVICE constexpr auto tiled_divide(ComposedLayout<A,O,B> const& a, Tiler const& b) { return composition(a.layout_a(), a.offset(), tiled_divide(a.layout_b(), b)); } template <class A, class O, class B, class Tiler> CUTE_HOST_DEVICE constexpr auto zipped_divide(ComposedLayout<A,O,B> const& a, Tiler const& b) { return composition(a.layout_a(), a.offset(), zipped_divide(a.layout_b(), b)); } template <class A, class O, class B, class Tiler> CUTE_HOST_DEVICE constexpr auto flat_divide(ComposedLayout<A,O,B> const& a, Tiler const& b) { return composition(a.layout_a(), a.offset(), flat_divide(a.layout_b(), b)); } template <class A, class O, class B, class Tiler> CUTE_HOST_DEVICE constexpr auto logical_product(ComposedLayout<A,O,B> const& a, Tiler const& b) { return composition(a.layout_a(), a.offset(), logical_product(a.layout_b(), b)); } template <class A, class O, class B, class Tiler> CUTE_HOST_DEVICE constexpr auto zipped_product(ComposedLayout<A,O,B> const& a, Tiler const& b) { return composition(a.layout_a(), a.offset(), zipped_product(a.layout_b(), b)); } template <class A, class O, class B, class Tiler> CUTE_HOST_DEVICE constexpr auto tiled_product(ComposedLayout<A,O,B> const& a, Tiler const& b) { return composition(a.layout_a(), a.offset(), tiled_product(a.layout_b(), b)); } template <class A, class O, class B, class Tiler> CUTE_HOST_DEVICE constexpr auto flat_product(ComposedLayout<A,O,B> const& a, Tiler const& b) { return composition(a.layout_a(), a.offset(), flat_product(a.layout_b(), b)); } template <class A, class O, class B, class Tiler> CUTE_HOST_DEVICE constexpr auto blocked_product(ComposedLayout<A,O,B> const& a, Tiler const& b) { return composition(a.layout_a(), a.offset(), blocked_product(a.layout_b(), b)); } template <class A, class O, class B, class Tiler> CUTE_HOST_DEVICE constexpr auto raked_product(ComposedLayout<A,O,B> const& a, Tiler const& b) { return composition(a.layout_a(), a.offset(), raked_product(a.layout_b(), b)); } template <class A, class O, class B, class Shape, class ModeOrder = GenColMajor> CUTE_HOST_DEVICE constexpr auto tile_to_shape(ComposedLayout<A,O,B> const& layout, Shape const& trg_shape, ModeOrder const& ord_shape = {}) { return composition(layout.layout_a(), layout.offset(), tile_to_shape(layout.layout_b(), trg_shape, ord_shape)); } template <class A, class O, class B, class Shape> CUTE_HOST_DEVICE constexpr auto filter(ComposedLayout<A,O,B> const& layout, Shape const& trg_profile) { return composition(layout.layout_a(), layout.offset(), filter(layout.layout_b(), trg_profile)); } template <class A, class O, class B> CUTE_HOST_DEVICE constexpr auto coalesce(ComposedLayout<A,O,B> const& layout) { return composition(layout.layout_a(), layout.offset(), coalesce(layout.layout_b())); } template <class A, class O, class B, class Shape> CUTE_HOST_DEVICE constexpr auto coalesce(ComposedLayout<A,O,B> const& layout, Shape const& trg_profile) { return composition(layout.layout_a(), layout.offset(), coalesce(layout.layout_b(), trg_profile)); } // // Upcast and Downcast // template <int N, class A, class O, class B> CUTE_HOST_DEVICE constexpr auto upcast(ComposedLayout<A,O,B> const& layout) { return composition(upcast<N>(layout.layout_a()), upcast<N>(layout.offset()), upcast<N>(layout.layout_b())); } template <int N, class A, class O, class B> CUTE_HOST_DEVICE constexpr auto downcast(ComposedLayout<A,O,B> const& layout) { return composition(downcast<N>(layout.layout_a()), downcast<N>(layout.offset()), downcast<N>(layout.layout_b())); } template <class OldType, class NewType, class A, class O, class B> CUTE_HOST_DEVICE constexpr auto recast_layout(ComposedLayout<A,O,B> const& layout) { using scale = decltype(trait_ratio(sizeof_bits<NewType>{}, sizeof_bits<OldType>{})); if constexpr (scale::num == 1 && scale::den == 1) { return layout; } else if constexpr (scale::num == 1) { return downcast<scale::den>(layout); } else if constexpr (scale::den == 1) { return upcast<scale::num>(layout); } else { static_assert(dependent_false<scale>, "Recast not supported."); } CUTE_GCC_UNREACHABLE; } // // Display utilities // template <class A, class O, class B> CUTE_HOST_DEVICE void print(ComposedLayout<A,O,B> const& layout) { print(layout.layout_a()); print(" o "); print(layout.offset()); print(" o "); print(layout.layout_b()); } #if !defined(__CUDACC_RTC__) template <class A, class O, class B> CUTE_HOST std::ostream& operator<<(std::ostream& os, ComposedLayout<A,O,B> const& layout) { return os << layout.layout_a() << " o " << layout.offset() << " o " << layout.layout_b(); } #endif } // end namespace cute
cutlass/include/cute/layout_composed.hpp/0
{ "file_path": "cutlass/include/cute/layout_composed.hpp", "repo_id": "cutlass", "token_count": 6665 }
20
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/layout.hpp> #include <cute/layout_composed.hpp> #include <cute/swizzle.hpp> /* Specialized functionality for a ComposedLayout of the form * InvolutionFn o Offset o LayoutB * where the InvolutionFn is a Swizzle<B,M,S> and is not linear (hence the need for the Offset). * * Because these are specializations for core functions of ComposedLayout, these Swizzle Layouts * provide similar functionality to Layout including tiling, partitioning, * coordinate-to-index mapping and layout manipulations, but are not considered "normal" layouts. * For example, these provide shape() and size() functions, but do not provide stride() functions. * * Furthermore, each of these specializations uses Swizzle<>-specific knowledge in its implementation and * attempts to decay itself to a normal-layout with dynamic or static strides when certain slicing conditions * are met. This is possible by determining the subdomain of the Swizzle<> function that is identity and * testing if LayoutB's codomain is contained within it. In general, MizedBits is used as the Offset to track * statically-vs-dynamically known bits in the Offset to improve the decay to static or dynamic normal layouts. */ namespace cute { // // Constructors // template <int B, int M, int S> CUTE_HOST_DEVICE constexpr auto make_layout(Swizzle<B,M,S> const& sxor) { return composition(sxor, Layout<Int<M+B+abs(S)>,Int<1>>{}); } namespace detail { template <int B, int M, int S, class OldShape, class OldStride, class NewShape, class NewStride> CUTE_HOST_DEVICE constexpr auto transfer_swizzle(Layout<OldShape,OldStride> const& old_layout, Layout<NewShape,NewStride> const& new_layout) { // Our goal is to determine a new swizzle for the strides in new_layout for consistent vectorizations // This is accomplished by identifying // S o L :=: S? o L* // We identify the "active" portion of S by computing (P o L)(c*) where P is a projection generated by S // Then that active identifier is transformed through the layouts: // L*(L[(P o L)(c*)]) // which is a new swizzle identifier for S?, the new swizzle // Projections of the swizzle layout for composition, P auto swizzle_only_zy = make_layout(make_shape (Int<(1 << M)>{}, Int<(1 << B)>{}, Int<(1 << (abs(S)-B))>{}, Int<(1 << B )>{}, Int<1>{}), make_stride( Int<0>{}, Int<(1 << M)>{}, Int<0>{}, Int<(1 << (M+abs(S)))>{}, Int<0>{})); // Compose with the tile to get the swizzle projection, P o L [The Z and Y contributing portions of L] auto layout_only_zy = composition(swizzle_only_zy, old_layout); // Transform the end coordinate to get the active bits of the swizzle, (P o L)(c*) auto swizzle_active_bits = layout_only_zy(size(layout_only_zy)-Int<1>{}); // Get the Z bit and the Y bits -- keep only those that are active in Z *and* Y auto zzz_msk = typename Swizzle<B,M,S>::zzz_msk{}; auto yyy_msk = typename Swizzle<B,M,S>::yyy_msk{}; auto msk_sft = typename Swizzle<B,M,S>::msk_sft{}; auto active_Z = swizzle_active_bits & shiftr(swizzle_active_bits, msk_sft) & zzz_msk; auto active_Y = swizzle_active_bits & shiftr(swizzle_active_bits, -msk_sft) & yyy_msk; // Pass the identifiers through the old layout and new layout to make a new swizzle identifier, L*(L[(P o L)(c*)]) auto new_active_Z = new_layout(old_layout.get_1d_coord(active_Z)); auto new_active_Y = new_layout(old_layout.get_1d_coord(active_Y)); // Use this new swizzle identifier to construct the new swizzle for new_layout // (this also makes sure it's a "valid" swizzle that Swizzle can represent) return composition(make_swizzle<new_active_Y,new_active_Z>(), new_layout); } } // end namespace detail template <int B, int M, int S, class Offset, class Layout> CUTE_HOST_DEVICE constexpr auto make_fragment_like(ComposedLayout<Swizzle<B,M,S>,Offset,Layout> const& layout) { return detail::transfer_swizzle<B,M,S>(layout.layout_b(), make_fragment_like(layout.layout_b())); } // // Utilities // namespace detail { // Get just the Swizzle part of a composed layout. template <int B, int M, int S, class Offset, class LayoutB> CUTE_HOST_DEVICE constexpr auto get_swizzle_portion(ComposedLayout<Swizzle<B,M,S>,Offset,LayoutB>) { return Swizzle<B,M,S>{}; } // A non-swizzled layout's "Swizzle part" is the identity swizzle. template <class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto get_swizzle_portion(Layout<Shape,Stride>) { return Swizzle<0,4,3>{}; } // Get the "non-swizzle" part of a composed layout, // which is the underlying (non-composed) Layout. template <int B, int M, int S, class Offset, class LayoutB> CUTE_HOST_DEVICE constexpr auto get_nonswizzle_portion(ComposedLayout<Swizzle<B,M,S>,Offset,LayoutB> const& slayout) { return slayout.layout_b(); } // The non-swizzle part of a non-swizzled layout is just the Layout. template <class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto get_nonswizzle_portion(Layout<Shape,Stride> const& slayout) { return slayout; } } // namespace detail // // Slice a Swizzled ComposedLayout // namespace detail { template <class IntZ, class IntY, class Offset, int... I> CUTE_HOST_DEVICE constexpr auto make_swizzle_strides(true_type, IntZ const& Z, IntY const& Y, Offset const& offset, int_sequence<I...>) { // Below is an optimized/compressed version of: //return cute::make_tuple((swizzle(offset + Z*Int<(1 << I)>{}) - swizzle(offset))...); // with knowledge of Swizzle, I... ranges for each B bits, // and the layout won't slice along z-bits that are already set // y\z 0 1 // 0 Z DC // 1 -Z DC return cute::make_tuple(conditional_return((offset & (Y << Int<I>{})) == Int<0>{}, Z << Int<I>{}, -(Z << Int<I>{}))...); } template <class IntZ, class IntY, class Offset, int... I> CUTE_HOST_DEVICE constexpr auto make_swizzle_strides(false_type, IntZ const& Z, IntY const& Y, Offset const& offset, int_sequence<I...>) { // Below is an optimized/compressed version of: //return cute::make_tuple((swizzle(offset + Y*Int<(1 << I)>{}) - swizzle(offset))...); // with knowledge of Swizzle, I... ranges for each B bits, // and the layout won't slice along y-bits that are already set // y\z 0 1 // 0 Y+Z Y-Z // 1 DC DC return cute::make_tuple(conditional_return((offset & (Z << Int<I>{})) == Int<0>{}, (Y+Z) << Int<I>{}, (Y-Z) << Int<I>{})...); } } // end namespace detail template <class Coord, int B, int M, int S, class Offset, class Layout> CUTE_HOST_DEVICE constexpr auto slice_and_offset(Coord const& coord, ComposedLayout<Swizzle<B,M,S>,Offset,Layout> const& layout) { if constexpr (all_underscore<Coord>::value) { // Skip the expensive/complicated attempt to decay to a normal layout and just reshape return cute::make_tuple(composition(layout.layout_a(), layout.offset(), slice(coord, layout.layout_b())), Int<0>{}); } else { // Projections of the swizzle layout for composition auto sw = make_layout(make_shape(Int<(1 << M)>{}, Int<(1 << B)>{}, Int<(1 << (abs(S)-B))>{}, Int<(1 << B)>{}, Int<1>{})); auto swizzle_anti_zy = make_layout(shape(sw), make_stride(stride<0>(sw), Int<0>{}, stride<2>(sw), Int<0>{}, size(sw))); auto swizzle_only_zy = make_layout(shape(sw), make_stride( Int<0>{}, stride<1>(sw), Int<0>{}, stride<3>(sw), Int<0>{})); // The portion of the layout that is not yet consumed auto sliced_layout = slice(coord, layout.layout_b()); // If the sliced_layout hits two bits that are swizzled together, then don't attempt to decay // Compose with the layout to get the swizzle projection, P o L [The Z and Y contributing portions of L] // (this also tests that shape/stride of layout compose with swizzle) auto sliced_layout_only_zy = composition(swizzle_only_zy, sliced_layout); // Transform the end coordinate to get the active bits of the swizzle, (P o L)(c*) auto swizzle_active_bits = sliced_layout_only_zy(size(sliced_layout_only_zy)-Int<1>{}); // Determine if any active bits collide under the swizzle auto hit_ZandY = !(swizzle_active_bits & ~layout.layout_a()(swizzle_active_bits)); // The portion of the layout that we are consuming now auto diced_layout = dice(coord, layout.layout_b()); auto diced_coord = dice(coord, coord); auto diced_layout_anti_zy = composition(swizzle_anti_zy, diced_layout); auto diced_layout_only_zy = composition(swizzle_only_zy, diced_layout); // New swizzle and offset auto swizzle = layout.layout_a(); // offset_only_zy interacts with swizzle and gets accumulated with layout.offset() // being careful about the static/dynamic contributions from diced_layout and diced_coord auto offset_only_zy = layout.offset() ^ to_mixed_bits(diced_layout_only_zy, diced_coord); // offset_anti_zy always gets passed through, no interaction with swizzle auto offset_anti_zy = diced_layout_anti_zy(diced_coord); // If Layout's codomain hits on Y AND Z, then it's not reducible // If Layout's codomain hits on Y XOR Z, then it's dynamic-normal // If Layout's codomain hits on neither Y NOR Z, then it's static-normal // Test the sliced layout for hit_X & hit_Y for potential decay if constexpr (is_constant<false, decltype(hit_ZandY)>::value) { // Hits on Y AND Z, so it's not reducible return cute::make_tuple(composition(swizzle, offset_only_zy, sliced_layout), offset_anti_zy); } else { // Misses on Y or Z, so it's static-normal or dynamic-normal // Lowest bit of the Z and Y masks auto Z = typename Swizzle<B,M,S>::zzz_msk{} & -typename Swizzle<B,M,S>::zzz_msk{}; auto Y = typename Swizzle<B,M,S>::yyy_msk{} & -typename Swizzle<B,M,S>::yyy_msk{}; auto stride_lo = detail::make_swizzle_strides(Z < Y, Z, Y, offset_only_zy, make_int_sequence<B>{}); auto stride_hi = detail::make_swizzle_strides(Z > Y, Z, Y, offset_only_zy, make_int_sequence<B>{}); // Construct a (dynamic) layout that we can perform the composition with auto swizzle_layout = make_layout(make_shape (Int<(1 << M)>{}, repeat<B>(Int<2>{}), Int<(1 << (abs(S)-B))>{}, repeat<B>(Int<2>{}), Int< 1>{}), make_stride(Int< 1>{}, stride_lo, Int<(1 << (M+B))>{}, stride_hi , Int<(1 << (M+B+abs(S)))>{})); // Decay to a normal layout with offset return cute::make_tuple(composition(swizzle_layout, sliced_layout), swizzle(offset_only_zy) + offset_anti_zy); } } CUTE_GCC_UNREACHABLE; } // // composition // // Ignore identity case template <int M, int S, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto composition(Swizzle<0,M,S> const&, Int<0> const&, Layout<Shape,Stride> const& layout) { return layout; } template <int B, int M, int S, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto composition(Swizzle<B,M,S> const& sxor, Layout<Shape,Stride> const& layout) { return composition(sxor, Int<0>{}, layout); } template <class ShapeA, class StrideA, int B, int M, int S> CUTE_HOST_DEVICE constexpr auto composition(Layout<ShapeA,StrideA> const& a, Swizzle<B,M,S> const& b) { // Get the Z bits and the Y bits auto active_Y = a(typename Swizzle<B,M,S>::yyy_msk{}); auto active_Z = a(typename Swizzle<B,M,S>::zzz_msk{}); // Works in simple cases... but could be greatly generalized return composition(make_swizzle<active_Y,active_Z>(), a); } // // inverse // // Specialization to attempt to pass-through the Swizzle back to the left -- Needed? template <int B, int M, int S, class Offset, class Layout> CUTE_HOST_DEVICE constexpr auto right_inverse(ComposedLayout<Swizzle<B,M,S>,Offset,Layout> const& layout) { if constexpr (is_constant<0, Offset>::value) { return composition(right_inverse(layout.layout_b()), layout.layout_a()); } else { return composition(right_inverse(layout.layout_b()), right_inverse(layout.offset()), right_inverse(layout.layout_a())); } } // Specialization to attempt to pass-through the Swizzle back to the left -- Needed? template <int B, int M, int S, class Offset, class Layout> CUTE_HOST_DEVICE constexpr auto left_inverse(ComposedLayout<Swizzle<B,M,S>,Offset,Layout> const& layout) { if constexpr (is_constant<0, Offset>::value) { return composition(left_inverse(layout.layout_b()), layout.layout_a()); } else { return composition(left_inverse(layout.layout_b()), left_inverse(layout.offset()), left_inverse(layout.layout_a())); } } template <int B, int M, int S> CUTE_HOST_DEVICE constexpr Swizzle<B,M,S> right_inverse(Swizzle<B,M,S> const& sw) { return sw; } template <int B, int M, int S> CUTE_HOST_DEVICE constexpr Swizzle<B,M,S> left_inverse(Swizzle<B,M,S> const& sw) { return sw; } // Kludge -- Probably want an OffsetFn<T> here instead template <class T, __CUTE_REQUIRES(is_integral<T>::value)> CUTE_HOST_DEVICE constexpr auto right_inverse(T const& t) { return -t; } // Kludge -- Probably want an OffsetFn<T> here instead template <class T, __CUTE_REQUIRES(is_integral<T>::value)> CUTE_HOST_DEVICE constexpr auto left_inverse(T const& t) { return -t; } // // Upcast and Downcast // template <int N, int B, int M, int S> CUTE_HOST_DEVICE constexpr auto upcast(Swizzle<B,M,S> const& swizzle) { static_assert(has_single_bit(N), "N must be a power of two"); constexpr int log2_n = bit_width(uint32_t(N)) - 1; constexpr int NewM = M - log2_n; if constexpr (NewM >= 0) { return Swizzle<B,NewM,S>{}; } else { return Swizzle<cute::max(B+NewM,0), 0, S>{}; } CUTE_GCC_UNREACHABLE; } template <int N, int B, int M, int S> CUTE_HOST_DEVICE constexpr auto downcast(Swizzle<B,M,S> const& swizzle) { static_assert(has_single_bit(N), "N must be a power of two"); constexpr int log2_n = bit_width(uint32_t(N)) - 1; return Swizzle<B,(M + log2_n),S>{}; } template <class OldType, class NewType, int B, int M, int S> CUTE_HOST_DEVICE constexpr auto recast_layout(Swizzle<B,M,S> const& swizzle) { using scale = decltype(trait_ratio(sizeof_bits<NewType>{}, sizeof_bits<OldType>{})); if constexpr (scale::num == 1 && scale::den == 1) { return swizzle; } else if constexpr (scale::num == 1) { return downcast<scale::den>(swizzle); } else if constexpr (scale::den == 1) { return upcast<scale::num>(swizzle); } else { static_assert(dependent_false<scale>, "Recast not supported."); } CUTE_GCC_UNREACHABLE; } // // Other operations // template <int B, int M, int S, class Offset, class LayoutB, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto max_common_layout(ComposedLayout<Swizzle<B,M,S>,Offset,LayoutB> const& a, Layout<Shape,Stride> const& b) { auto common = max_common_layout(a.layout_b(), b); auto base = Int<(1 << M)>{}; if constexpr (base < size(common)) { return common.compose(base); // Truncate common to size base } else { return common; } } template <class Shape, class Stride, int B, int M, int S, class Offset, class LayoutB> CUTE_HOST_DEVICE constexpr auto max_common_layout(Layout<Shape,Stride> const& a, ComposedLayout<Swizzle<B,M,S>,Offset,LayoutB> const& b) { return max_common_layout(b, a); } template <int B, int M, int S, class Offset, class LayoutB, class Shape, class Stride> CUTE_HOST_DEVICE constexpr auto max_common_vector(ComposedLayout<Swizzle<B,M,S>,Offset,LayoutB> const& a, Layout<Shape,Stride> const& b) { // This assumes that Offset is in the YZ domain of the Swizzle... return cute::min(Int<(1 << M)>{}, max_common_vector(a.layout_b(), b)); } template <class Shape, class Stride, int B, int M, int S, class Offset, class LayoutB> CUTE_HOST_DEVICE constexpr auto max_common_vector(Layout<Shape,Stride> const& a, ComposedLayout<Swizzle<B,M,S>,Offset,LayoutB> const& b) { return max_common_vector(b, a); } template <int B0, int M0, int S0, class Offset0, class LayoutB0, int B1, int M1, int S1, class Offset1, class LayoutB1> CUTE_HOST_DEVICE constexpr auto max_common_vector(ComposedLayout<Swizzle<B0,M0,S0>,Offset0,LayoutB0> const& a, ComposedLayout<Swizzle<B1,M1,S1>,Offset1,LayoutB1> const& b) { auto result = coalesce(composition(a, right_inverse(b))); if constexpr (is_constant<1, decltype(stride<0>(result.layout_b()))>::value) { return shape<0>(result); } else { return Int<1>{}; } CUTE_GCC_UNREACHABLE; } /////////////////////////////////////////////////////////////////////////////// // ComposedLayout as second argument is often more difficult... template <class Shape, class Stride, int B, int M, int S, class Offset, class LayoutT> CUTE_HOST_DEVICE constexpr auto logical_product(Layout<Shape,Stride> const& layout, ComposedLayout<Swizzle<B,M,S>,Offset,LayoutT> const& tiler) { CUTE_STATIC_ASSERT_V(tiler.offset() == Int<0>{}, "Require Swizzle offset == 0."); // The new layout -- if swizzle wasn't an issue, this is the result // our goal is to determine a new swizzle for these strides auto new_layout = logical_product(layout, tiler.layout_b()); // This is accomplished by identifying // S o L :=: S? o L* // We identify the "active" portion of S by computing (P o L)(c*) where P is a projection generated by S // Then that active identifier is transformed through the layouts: // L*(L[(P o L)(c*)]) // which is a new swizzle identifier for S?, the new swizzle // Projections of the swizzle layout for composition, P auto swizzle_only_zy = make_layout(make_shape (Int<(1 << M)>{}, Int<(1 << B)>{}, Int<(1 << (abs(S)-B))>{}, Int<(1 << B )>{}, Int<1>{}), make_stride( Int<0>{}, Int<(1 << M)>{}, Int<0>{}, Int<(1 << (M+abs(S)))>{}, Int<0>{})); // Compose with the tiler to get the swizzle projection, P o L [The Z and Y contributing portions of L] auto layout_only_zy = composition(swizzle_only_zy, tiler.layout_b()); // Transform the end coordinate to get the active bits of the swizzle, (P o L)(c*) auto swizzle_active_bits = layout_only_zy(size(layout_only_zy)-Int<1>{}); // Get the Z bit and the Y bits auto active_Z = swizzle_active_bits & typename Swizzle<B,M,S>::zzz_msk{}; auto active_Y = swizzle_active_bits & typename Swizzle<B,M,S>::yyy_msk{}; // Pass the identifiers through the old layout and new layout to make a new swizzle identifier, L*(L[(P o L)(c*)]) auto new_active_Z = new_layout(Int<0>{}, tiler.layout_b()[active_Z]); auto new_active_Y = new_layout(Int<0>{}, tiler.layout_b()[active_Y]); // Use this new swizzle identifier to construxt the new swizzle for new_layout // (this also makes sure it's a "valid" swizzle that Swizzle can represent) return composition(make_swizzle<new_active_Y,new_active_Z>(), new_layout); } } // end namespace cute
cutlass/include/cute/swizzle_layout.hpp/0
{ "file_path": "cutlass/include/cute/swizzle_layout.hpp", "repo_id": "cutlass", "token_count": 8160 }
21
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Matrix multiply */ #pragma once #include <cuda_fp16.h> #include "cutlass/arch/mma.h" #include "cutlass/layout/matrix.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace arch { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation template <typename LayoutA, typename LayoutB, typename LayoutC> struct Mma< gemm::GemmShape<2,1,1>, 1, half_t, LayoutA, half_t, LayoutB, half_t, LayoutC, OpMultiplyAdd> { using Shape = gemm::GemmShape<2, 1, 1>; using Operator = OpMultiplyAdd; using ElementC = half_t; CUTLASS_HOST_DEVICE void operator()( Array<half_t, 2> &d, Array<half_t, 2> const &a, Array<half_t, 1> const &b, Array<half_t, 2> const &c ) { #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)) __half2 const & A = reinterpret_cast<__half2 const &>(a); __half2 B = __half2half2(reinterpret_cast<__half const &>(b)); __half2 const & C = reinterpret_cast<__half2 const &>(c); __half2 D = __hfma2(A, B, C); d = reinterpret_cast<Array<half_t, 2> &>(D); #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 2; ++i) { d[i] = a[i] * b[0] + c[i]; } #endif } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation template <typename LayoutA, typename LayoutB> struct Mma< gemm::GemmShape<1,2,1>, 1, half_t, LayoutA, half_t, LayoutB, half_t, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<1, 2, 1>; using Operator = OpMultiplyAdd; using ElementC = half_t; CUTLASS_HOST_DEVICE void operator()( Array<half_t, 2> &d, Array<half_t, 1> const &a, Array<half_t, 2> const &b, Array<half_t, 2> const &c ) { #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)) __half2 const & A = __half2half2(reinterpret_cast<__half const &>(a)); __half2 B = reinterpret_cast<__half2 const &>(b); __half2 const & C = reinterpret_cast<__half2 const &>(c); __half2 D = __hfma2(A, B, C); d = reinterpret_cast<Array<half_t, 2> &>(D); #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 2; ++i) { d[i] = a[0] * b[i] + c[i]; } #endif } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation template <> struct Mma < gemm::GemmShape<2, 2, 1>, 1, half_t, layout::ColumnMajor, half_t, layout::RowMajor, half_t, layout::ColumnMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<2, 2, 1>; using Operator = OpMultiplyAdd; using ElementC = half_t; CUTLASS_HOST_DEVICE void operator()( Array<half_t, 4> &d, Array<half_t, 2> const &a, Array<half_t, 2> const &b, Array<half_t, 4> const &c ) { #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)) __half2 const & A = reinterpret_cast<__half2 const &>(a); __half2 Blo = __low2half2(reinterpret_cast<__half2 const &>(b)); __half2 Bhi = __high2half2(reinterpret_cast<__half2 const &>(b)); __half2 const *C = reinterpret_cast<__half2 const *>(&c); __half2 Dlo = __hfma2(A, Blo, C[0]); __half2 Dhi = __hfma2(A, Bhi, C[1]); Array<half_t, 2> * D = reinterpret_cast<Array<half_t, 2> *>(&d); D[0] = reinterpret_cast<Array<half_t, 2> const &>(Dlo); D[1] = reinterpret_cast<Array<half_t, 2> const &>(Dhi); #else CUTLASS_PRAGMA_UNROLL for (int j = 0; j < 2; ++j) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 2; ++i) { d[i + 2 * j] = a[i] * b[j] + c[i + 2 * j]; } } #endif } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation template <> struct Mma< gemm::GemmShape<2, 2, 1>, 1, half_t, layout::ColumnMajor, half_t, layout::RowMajor, half_t, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<2, 2, 1>; using Operator = OpMultiplyAdd; using ElementC = half_t; CUTLASS_HOST_DEVICE void operator()( Array<half_t, 4> &d, Array<half_t, 2> const &a, Array<half_t, 2> const &b, Array<half_t, 4> const &c ) { #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)) __half2 Alo = __low2half2(reinterpret_cast<__half2 const &>(a)); __half2 Ahi = __high2half2(reinterpret_cast<__half2 const &>(a)); __half2 const & B = reinterpret_cast<__half2 const &>(b); __half2 const *C = reinterpret_cast<__half2 const *>(&c); __half2 Dlo = __hfma2(Alo, B, C[0]); __half2 Dhi = __hfma2(Ahi, B, C[0]); Array<half_t, 2> * D = reinterpret_cast<Array<half_t, 2> *>(&d); D[0] = reinterpret_cast<Array<half_t, 2> &>(Dlo); D[1] = reinterpret_cast<Array<half_t, 2> &>(Dhi); #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 2; ++i) { CUTLASS_PRAGMA_UNROLL for (int j = 0; j < 2; ++j) { d[i * 2 + j] = a[i] * b[j] + c[i * 2 + j]; } } #endif } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } }
cutlass/include/cutlass/arch/mma_sm60.h/0
{ "file_path": "cutlass/include/cutlass/arch/mma_sm60.h", "repo_id": "cutlass", "token_count": 2675 }
22
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Matrix multiply */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/layout/matrix.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace arch { //////////////////////////////////////////////////////////////////////////////// // // WMMA template structure defines nvcuda::wmma::fragments and static assert for // wmma native instruction sizes supported for cutlass::int4b_t (experimental::s4). // //////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename LayoutA_, typename LayoutB_, typename LayoutC_> struct Wmma< Shape_, ///< Size of the matrix product (concept: GemmShape) cutlass::int4b_t, ///< ElementA LayoutA_, ///< LayoutA cutlass::int4b_t, ///< ElementB LayoutB_, ///< LayoutB int32_t, ///< ElementC LayoutC_, ///< LayoutC cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc) > { #if defined(CUTLASS_ARCH_WMMA_SM75_ENABLED) using Shape = Shape_; using ElementA = cutlass::int4b_t; using LayoutA = LayoutA_; using ElementB = cutlass::int4b_t; using LayoutB = LayoutB_; using ElementC = int32_t; using LayoutC = LayoutC_; using Operator = cutlass::arch::OpMultiplyAdd; using ArchTag = arch::Sm75; // check supported wmma shape for the given multiplicand data types static_assert( platform::is_same<cutlass::gemm::GemmShape<8, 8, 32>, Shape>::value, "Supported list of wmma operator shape for s8 multiplicands is: 8x8x32"); // Wmma Fragment using FragmentA = nvcuda::wmma::fragment< nvcuda::wmma::matrix_a, Shape::kM, Shape::kN, Shape::kK, typename CutlassToWmmaDataType<ElementA>::Type, typename CutlassToWmmaLayout<LayoutA>::Layout>; using FragmentB = nvcuda::wmma::fragment< nvcuda::wmma::matrix_b, Shape::kM, Shape::kN, Shape::kK, typename CutlassToWmmaDataType<ElementB>::Type, typename CutlassToWmmaLayout<LayoutB>::Layout>; using FragmentC = nvcuda::wmma::fragment< nvcuda::wmma::accumulator, Shape::kM, Shape::kN, Shape::kK, typename CutlassToWmmaDataType<ElementC>::Type>; /// Performs a nvcuda::wmma matrix multiply-accumulate operation CUTLASS_DEVICE void operator()( FragmentC &D, FragmentA const &A, FragmentB const &B, FragmentC const &C) const { nvcuda::wmma::mma_sync(D, A, B, C); } #else static_assert(false, "wmma.mma.sync interger type multiplicands is avialable only for SM75 and beyond"); #endif }; //////////////////////////////////////////////////////////////////////////////// // // WMMA template structure defines nvcuda::wmma::fragments and static assert for // wmma native instruction sizes supported for cutlass::uint1b_t (experimental::b1). // //////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename LayoutA_, typename LayoutB_, typename LayoutC_> struct Wmma< Shape_, ///< Size of the matrix product (concept: GemmShape) cutlass::uint1b_t, ///< ElementA LayoutA_, ///< LayoutA cutlass::uint1b_t, ///< ElementB LayoutB_, ///< LayoutB int32_t, ///< ElementC LayoutC_, ///< LayoutC cutlass::arch::OpXorPopc ///< Operator (multiply-add, xor.popc) > { #if defined(CUTLASS_ARCH_WMMA_SM75_ENABLED) using Shape = Shape_; using ElementA = cutlass::uint1b_t; using LayoutA = LayoutA_; using ElementB = cutlass::uint1b_t; using LayoutB = LayoutB_; using ElementC = int32_t; using LayoutC = LayoutC_; using Operator = cutlass::arch::OpXorPopc; using ArchTag = arch::Sm75; // check supported wmma shape for the given multiplicand data types static_assert( platform::is_same<cutlass::gemm::GemmShape<8, 8, 128>, Shape>::value, "Supported list of wmma operator shape for b1 multiplicands is: 8x8x128"); // Wmma Fragment using FragmentA = nvcuda::wmma::fragment< nvcuda::wmma::matrix_a, Shape::kM, Shape::kN, Shape::kK, typename CutlassToWmmaDataType<ElementA>::Type, typename CutlassToWmmaLayout<LayoutA>::Layout>; using FragmentB = nvcuda::wmma::fragment< nvcuda::wmma::matrix_b, Shape::kM, Shape::kN, Shape::kK, typename CutlassToWmmaDataType<ElementB>::Type, typename CutlassToWmmaLayout<LayoutB>::Layout>; using FragmentC = nvcuda::wmma::fragment< nvcuda::wmma::accumulator, Shape::kM, Shape::kN, Shape::kK, typename CutlassToWmmaDataType<ElementC>::Type>; /// Performs a nvcuda::wmma matrix multiply-accumulate operation CUTLASS_DEVICE void operator()( FragmentC &D, FragmentA const &A, FragmentB const &B, FragmentC const &C) const { nvcuda::wmma::bmma_sync(D, A, B, C, nvcuda::wmma::experimental::bmmaBitOpXOR, nvcuda::wmma::experimental::bmmaAccumulateOpPOPC); } #else static_assert(false, "wmma.mma.sync interger type multiplicands is avialable only for SM75 and beyond"); #endif }; } // namespace arch } // namespace cutlass
cutlass/include/cutlass/arch/wmma_sm75.h/0
{ "file_path": "cutlass/include/cutlass/arch/wmma_sm75.h", "repo_id": "cutlass", "token_count": 3043 }
23
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/conv/convnd_problem_shape.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::conv::collective::detail { ///////////////////////////////////////////////////////////////////////////////////////////////// // Construct the stride types for conv collectives based on the dispatch policy, strides 64b by default template <class DispatchPolicy> constexpr auto sm90_dispatch_policy_to_stride_A() { if constexpr (DispatchPolicy::ConvOp == conv::Operator::kFprop) { // Maps to modes ((w,n), C) if constexpr (DispatchPolicy::NumSpatialDimensions == 1) { return cute::Stride<cute::Stride<int64_t, int64_t>, cute::Int<1>>{}; } // Maps to modes ((w,h,n), C) else if constexpr (DispatchPolicy::NumSpatialDimensions == 2) { return cute::Stride<cute::Stride<int64_t, int64_t, int64_t>, cute::Int<1>>{}; } // Maps to modes ((w,h,d,n), C) else if constexpr (DispatchPolicy::NumSpatialDimensions == 3) { return cute::Stride<cute::Stride<int64_t, int64_t, int64_t, int64_t>, cute::Int<1>>{}; } // error dims assert else { static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported spatial dim count."); } } else if constexpr (DispatchPolicy::ConvOp == conv::Operator::kWgrad) { // Maps to modes (k, nq/npq/nzpq) if constexpr (DispatchPolicy::NumSpatialDimensions == 1 || DispatchPolicy::NumSpatialDimensions == 2 || DispatchPolicy::NumSpatialDimensions == 3) { return cute::Stride<cute::Int<1>, int64_t>{}; } // error dims assert else { static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported spatial dim count."); } } else if constexpr (DispatchPolicy::ConvOp == conv::Operator::kDgrad) { // Maps to modes ((q,n), K) if constexpr (DispatchPolicy::NumSpatialDimensions == 1) { return cute::Stride<cute::Stride<int64_t, int64_t>, cute::Int<1>>{}; } // Maps to modes ((q,p,n), K) else if constexpr (DispatchPolicy::NumSpatialDimensions == 2) { return cute::Stride<cute::Stride<int64_t, int64_t, int64_t>, cute::Int<1>>{}; } // Maps to modes ((q,p,z,n), K) else if constexpr (DispatchPolicy::NumSpatialDimensions == 3) { return cute::Stride<cute::Stride<int64_t, int64_t, int64_t, int64_t>, cute::Int<1>>{}; } // error dims assert else { static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported spatial dim count."); } } else { static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported ConvOp."); } } // Construct the stirde types for conv collectives based on the dispatch policy, strides 64b by default template <class DispatchPolicy> constexpr auto sm90_dispatch_policy_to_stride_B() { if constexpr (DispatchPolicy::ConvOp == conv::Operator::kFprop) { // Maps to modes (k, (C,s)) if constexpr (DispatchPolicy::NumSpatialDimensions == 1) { return cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t>>{}; } // Maps to modes (k, (C,s,r)) else if constexpr (DispatchPolicy::NumSpatialDimensions == 2) { return cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t, int64_t>>{}; } // Maps to modes (k, (C,s,r,t)) else if constexpr (DispatchPolicy::NumSpatialDimensions == 3) { return cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t, int64_t, int64_t>>{}; } // error dims assert else { static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported spatial dim count."); } } else if constexpr (DispatchPolicy::ConvOp == conv::Operator::kWgrad) { // Maps to modes (C, (w,n)) if constexpr (DispatchPolicy::NumSpatialDimensions == 1) { return cute::Stride<cute::Int<1>, cute::Stride<int64_t, int64_t>>{}; } // Maps to modes (C, (w,h,n)) else if constexpr (DispatchPolicy::NumSpatialDimensions == 2) { return cute::Stride<cute::Int<1>, cute::Stride<int64_t, int64_t, int64_t>>{}; } // Maps to modes (C, (w,h,d,n)) else if constexpr (DispatchPolicy::NumSpatialDimensions == 3) { return cute::Stride<cute::Int<1>, cute::Stride<int64_t, int64_t, int64_t, int64_t>>{}; } // error dims assert else { static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported spatial dim count."); } } else if constexpr (DispatchPolicy::ConvOp == conv::Operator::kDgrad) { // Maps to modes (C, (k,s)) if constexpr (DispatchPolicy::NumSpatialDimensions == 1) { return cute::Stride<cute::Int<1>, cute::Stride<int64_t, int64_t>>{}; } // Maps to modes (C, (k,s,r)) else if constexpr (DispatchPolicy::NumSpatialDimensions == 2) { return cute::Stride<cute::Int<1>, cute::Stride<int64_t, int64_t, int64_t>>{}; } // Maps to modes (C, (k,s,r,t)) else if constexpr (DispatchPolicy::NumSpatialDimensions == 3) { return cute::Stride<cute::Int<1>, cute::Stride<int64_t, int64_t, int64_t, int64_t>>{}; } // error dims assert else { static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported spatial dim count."); } } else { static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported ConvOp."); } } ///////////////////////////////////////////////////////////////////////////////////////////////// // Compute the lower/near corner, returning it as a cute::array in [W,H,D] order template <conv::Operator ConvOp, int NumSpatialDimensions> CUTLASS_HOST_DEVICE constexpr auto compute_lower_corner_whd(ConvProblemShape<ConvOp, NumSpatialDimensions> const& problem_shape) { using cute::for_each; using cute::make_seq; cute::array<int, NumSpatialDimensions> lower{}; if constexpr (ConvOp == conv::Operator::kFprop || ConvOp == conv::Operator::kWgrad) { for_each(make_seq<NumSpatialDimensions>{}, [&](auto i) { lower[NumSpatialDimensions-1-i] = -1 * problem_shape.lower_padding[i]; }); } else if constexpr (ConvOp == conv::Operator::kDgrad) { for_each(make_seq<NumSpatialDimensions>{}, [&](auto i) { lower[NumSpatialDimensions-1-i] = problem_shape.lower_padding[i] - (problem_shape.shape_B[i+1] - 1) * problem_shape.dilation[i]; }); } return lower; } // Computes the upper/far corner, returning it as a cute::array in [W,H,D] order template <conv::Operator ConvOp, int NumSpatialDimensions> CUTLASS_HOST_DEVICE constexpr auto compute_upper_corner_whd(ConvProblemShape<ConvOp, NumSpatialDimensions> const& problem_shape) { using cute::for_each; using cute::make_seq; cute::array<int, NumSpatialDimensions> upper{}; if constexpr (ConvOp == conv::Operator::kFprop) { for_each(make_seq<NumSpatialDimensions>{}, [&](auto i) { upper[NumSpatialDimensions-1-i] = problem_shape.upper_padding[i] - (problem_shape.shape_B[i+1] - 1) * problem_shape.dilation[i]; }); } else if constexpr (ConvOp == conv::Operator::kWgrad) { for_each(make_seq<NumSpatialDimensions>{}, [&](auto i) { upper[NumSpatialDimensions-1-i] = problem_shape.upper_padding[i] - (problem_shape.shape_C[i+1] - 1) * problem_shape.dilation[i]; }); } else if constexpr (ConvOp == conv::Operator::kDgrad) { for_each(make_seq<NumSpatialDimensions>{}, [&](auto i) { upper[NumSpatialDimensions-1-i] = problem_shape.lower_padding[i] - (problem_shape.shape_B[i+1] - 1) * problem_shape.dilation[i] + problem_shape.shape_C[i+1] - problem_shape.shape_A[i+1]; }); } return upper; } // Compute the lower/near corner of (t,r,s), returning it as a cute::array in [S,R,T] order template <conv::Operator ConvOp, int NumSpatialDimensions> CUTLASS_HOST_DEVICE constexpr auto compute_lower_srt(ConvProblemShape<ConvOp, NumSpatialDimensions> const& problem_shape) { using cute::for_each; using cute::make_seq; cute::array<int, NumSpatialDimensions> lower{}; if constexpr (ConvOp == conv::Operator::kFprop || ConvOp == conv::Operator::kWgrad) { for_each(make_seq<NumSpatialDimensions>{}, [&](auto i) { lower[NumSpatialDimensions-1-i] = 0; }); } else if constexpr (ConvOp == conv::Operator::kDgrad) { for_each(make_seq<NumSpatialDimensions>{}, [&](auto i) { lower[NumSpatialDimensions-1-i] = (problem_shape.shape_B[i+1] - 1) * problem_shape.dilation[i]; }); } return lower; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::conv::collective::detail
cutlass/include/cutlass/conv/collective/detail.hpp/0
{ "file_path": "cutlass/include/cutlass/conv/collective/detail.hpp", "repo_id": "cutlass", "token_count": 4033 }
24
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a multi-staged Depthwise Convolution kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/aligned_buffer.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/semaphore.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/tensor.h" #include "cutlass/gemm/gemm.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/epilogue/threadblock/output_iterator_parameter.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters structure template <typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad) typename Arguments_, ///! Kernel Arguments typename ConvOutputIteratorParameter_, ///! Output Iterator Params typename ConvProblemSize_ = Conv2dProblemSize, ///! Convolutional operator on 2D or 3D problem conv::GroupMode GroupMode_ = conv::GroupMode::kNone, ///! Group mode typename ThreadBlockOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1> > ///! OutputShape per ThreadBlock struct DirectConvolutionParams { using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ThreadBlockOutputShape = ThreadBlockOutputShape_; static Operator const kConvolutionalOperator = ConvOperator; using ConvProblemSize = ConvProblemSize_; using Arguments = Arguments_; using ConvOutputIteratorParameter = ConvOutputIteratorParameter_; using ThreadblockShape = typename Mma::Shape; static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm; static conv::GroupMode const kGroupMode = GroupMode_; static int const kStages = Mma::kStages; ConvProblemSize problem_size; cutlass::gemm::GemmCoord grid_tiled_shape; gemm::GemmCoord implicit_gemm_problem_size; int swizzle_log_tile; int smem_size_; int gemm_k_iterations; int gemm_k_iterations_per_channel; typename Mma::IteratorA::Params iterator_A; typename Mma::IteratorA::Element const *ptr_A; typename Mma::IteratorB::Params iterator_B; typename Mma::IteratorB::Element const *ptr_B; typename Mma::IteratorB::Element *ptr_reordered_B; typename Epilogue::OutputTileIterator::Params iterator_C; typename Epilogue::OutputTileIterator::Element *ptr_C; typename Epilogue::OutputTileIterator::Params iterator_D; typename Epilogue::OutputTileIterator::Element *ptr_D; typename EpilogueOutputOp::Params output_op; int *semaphore; SplitKMode split_k_mode; int split_k_slices; // // Methods // CUTLASS_HOST_DEVICE DirectConvolutionParams() : swizzle_log_tile(0), gemm_k_iterations(0) {} /// CUTLASS_HOST_DEVICE DirectConvolutionParams(Arguments const &args, int *semaphore = nullptr) : problem_size(args.problem_size), implicit_gemm_problem_size( cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size)), iterator_A(Mma::IteratorA::getParams(args.problem_size, args.ref_A.layout())), ptr_A(args.ref_A.data()), iterator_B(Mma::IteratorB::getParams(args.problem_size, args.ref_B.layout())), ptr_B(args.ref_B.data()), ptr_reordered_B(args.ref_reordered_B.data()), iterator_C(ConvOutputIteratorParameter::layout(args.ref_C), args.problem_size), ptr_C(args.ref_C.data()), iterator_D(ConvOutputIteratorParameter::layout(args.ref_D), args.problem_size), ptr_D(args.ref_D.data()), output_op(args.output_op), semaphore(semaphore), split_k_mode(args.split_k_mode), split_k_slices(args.problem_size.split_k_slices) { gemm_k_iterations = depthwise_gemm_k_iterations<ThreadBlockOutputShape::kN, ThreadBlockOutputShape::kH, ThreadBlockOutputShape::kW>(kConvolutionalOperator, ThreadblockShape::kK, args.problem_size, kIteratorAlgorithm, kGroupMode, ThreadblockShape::kN); gemm_k_iterations_per_channel = implicit_gemm_k_iterations_per_channel( kConvolutionalOperator, args.problem_size, kIteratorAlgorithm); ThreadblockSwizzle threadblock_swizzle; grid_tiled_shape = threadblock_swizzle.get_tiled_shape( kConvolutionalOperator, problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices); swizzle_log_tile = threadblock_swizzle.get_log_tile(grid_tiled_shape); // Dynamic SMEM usage because stride and dilation are runtime params. smem_size_ = (max(iterator_A.activation_size, int(sizeof(typename Epilogue::SharedStorage))) * kStages + iterator_B.filter_size); } CUTLASS_HOST_DEVICE int get_smem_size() { // Dynamic Smem Size return smem_size_; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Params_, typename ElementB_> struct ReorderKernel { using Params = Params_; using ElementB = ElementB_; union SharedStorage {}; static unsigned int const kReorderKernelThreadPerCTA = 128; CUTLASS_HOST_DEVICE ReorderKernel() {} CUTLASS_HOST_DEVICE static dim3 get_grid_shape(Params const &params) { return dim3{static_cast<unsigned int>( (params.problem_size.filter_size() + kReorderKernelThreadPerCTA - 1) / kReorderKernelThreadPerCTA), 1, 1}; } CUTLASS_HOST_DEVICE static dim3 get_block_shape() { return dim3{kReorderKernelThreadPerCTA, 1, 1}; } CUTLASS_HOST_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { int64_t m = static_cast<int64_t>(params.problem_size.groups); int64_t n = static_cast<int64_t>(params.problem_size.filter_size() / params.problem_size.K); const ElementB *src_with_type = static_cast<const ElementB *>(params.ptr_B); ElementB *dst_with_type = static_cast<ElementB *>(params.ptr_reordered_B); int64_t linear_index = blockIdx.x * kReorderKernelThreadPerCTA + threadIdx.x; int64_t index_m = linear_index / n; int64_t index_n = linear_index % n; int64_t new_linear_index = index_m + index_n * m; if (linear_index < m * n) { dst_with_type[new_linear_index] = src_with_type[linear_index]; } return; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad) typename ConvProblemSize_ = Conv2dProblemSize, ///! Convolutional operator on 2D or 3D problem conv::GroupMode GroupMode_ = conv::GroupMode::kNone, ///! Group mode typename ThreadBlockOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1> > struct DirectConvolution { using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ThreadBlockOutputShape = ThreadBlockOutputShape_; static Operator const kConvolutionalOperator = ConvOperator; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename EpilogueOutputOp::ElementOutput; /// Set output tensor C layout using LayoutC = LayoutA; using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator; using ElementCompute = typename EpilogueOutputOp::ElementCompute; using WarpMmaOperator = typename Mma::Policy::Operator; using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator; using MathOperator = typename ArchMmaOperator::Operator; using OperatorClass = typename WarpMmaOperator::OperatorClass; using ArchTag = typename WarpMmaOperator::ArchTag; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename WarpMmaOperator::Shape; using InstructionShape = typename cutlass::gemm::GemmShape<1, 1, 1>; static int const kStages = Mma::kStages; static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm; static StrideSupport const kStrideSupport = Mma::IteratorA::kStrideSupport; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; using TensorRefA = typename Mma::IteratorA::TensorRef; using TensorRefB = typename Mma::IteratorB::TensorRef; using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>; /// Check iterator A and B convolution dimension are the same and // set device::ImplicitGemmConvolution::kConvDim static_assert(Mma::IteratorA::kConvDim == Mma::IteratorB::kConvDim, "Convolution on different different dimensions is not supported"); static int const kConvDim = Mma::IteratorA::kConvDim; /// Conv dimension and problem size structure (Conv2d or Conv3d) using ConvProblemSize = ConvProblemSize_; static conv::GroupMode const kGroupMode = GroupMode_; // // // using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter< LayoutC, typename Epilogue::OutputTileIterator::Layout, TensorRefC, ConvOperator, ConvProblemSize >; /// Argument structure struct Arguments { // // Data members // ConvProblemSize problem_size; TensorRefA ref_A; TensorRefB ref_B; TensorRefB ref_reordered_B; TensorRefC ref_C; TensorRefC ref_D; typename EpilogueOutputOp::Params output_op; SplitKMode split_k_mode; // // Methods // /// Default ctor CUTLASS_HOST_DEVICE Arguments() { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size ): problem_size(problem_size) { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size, TensorRefA const & ref_A, TensorRefB const & ref_B, TensorRefC const & ref_C, TensorRefC const & ref_D, typename EpilogueOutputOp::Params const & output_op, TensorRefB const & ref_reordered_B = nullptr, SplitKMode const & split_k_mode = SplitKMode::kSerial ): problem_size(problem_size), ref_A(ref_A), ref_B(ref_B), ref_C(ref_C), ref_D(ref_D), output_op(output_op), ref_reordered_B(ref_reordered_B), split_k_mode(split_k_mode) { } }; using Params = typename cutlass::conv::kernel::DirectConvolutionParams<Mma, Epilogue, ThreadblockSwizzle, kConvolutionalOperator, Arguments, ConvOutputIteratorParameter, ConvProblemSize, kGroupMode, ThreadBlockOutputShape>; using ReorderKernel = typename cutlass::conv::kernel::ReorderKernel<Params, ElementB>; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; // // Methods // CUTLASS_HOST_DEVICE DirectConvolution() { } /// Executes one ImplicitGEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if threadblock is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() || params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) { return; } // Compute position within threadblock int thread_idx = threadIdx.x; int iterator_column_offset = 0; int filter_row_offset = 0; if (kGroupMode != GroupMode::kNone) { if (kGroupMode == GroupMode::kDepthwise) { iterator_column_offset += threadblock_tile_idx.n() * Mma::Shape::kN; } } // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.iterator_A, params.problem_size, params.ptr_A, thread_idx, MatrixCoord( threadblock_tile_idx.m() + threadblock_tile_idx.k(), iterator_column_offset ) ); typename Mma::IteratorB iterator_B( params.iterator_B, params.problem_size, params.ptr_reordered_B, thread_idx, MatrixCoord( filter_row_offset, iterator_column_offset ) ); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // // Epilogue // EpilogueOutputOp output_op(params.output_op); // Compute logical position within grid threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); MatrixCoord threadblock_offset( threadblock_tile_idx.m() + threadblock_tile_idx.k(), threadblock_tile_idx.n() * Mma::Shape::kN ); // Tile iterator writing to destination tensor typename Epilogue::OutputTileIterator iterator_D( params.iterator_D, params.ptr_D, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, threadblock_offset ); // Tile iterator reading from source accumulator tensor typename Epilogue::OutputTileIterator iterator_C( params.iterator_C, params.ptr_C, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, threadblock_offset ); // Construct the epilogue Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Compute threadblock-scoped matrix multiply-add // Epilogue is fused in the mainloop mma(params.gemm_k_iterations, accumulators, iterator_A, params.iterator_A, iterator_B, params.iterator_B, accumulators, epilogue, output_op, iterator_D, iterator_C, params.split_k_slices); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/kernel/direct_convolution.h/0
{ "file_path": "cutlass/include/cutlass/conv/kernel/direct_convolution.h", "repo_id": "cutlass", "token_count": 7146 }
25
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile) matrix from memory. This iterator assumes TensorNHWC or TensorCxRSKx<Interleave> layout of tensors in Global Memory. The iterator is specialized for each of the three convolution operators: forward propagation (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad). */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/threadblock/conv2d_params.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename Element_, typename Layout_, typename ThreadMap_, typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess>, conv::GroupMode GroupMode_ = conv::GroupMode::kNone, bool IsDeconv_ = false > class Conv2dFpropFilterTileAccessIteratorAnalytic { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = Layout_; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using TensorRef = cutlass::TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static bool const IsDeconv = IsDeconv_; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic; static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided; static int const kConvDim = 2; using ConvProblemSize = typename conv::Conv2dProblemSize; static conv::GroupMode const kGroupMode = GroupMode_; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); // // Simplifying assertions // static_assert(ThreadMap::Iterations::kContiguous == 1, "Require Iterations::kContiguous == 1"); // // Parameters structure // using Params = Conv2dAnalyticParams<Layout>; private: Params const &params_; Conv2dProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; LongIndex iteration_vector_; char const *pointer_; int filter_r_; int filter_s_; int filter_c_; int filter_c_init_; int crs_cnt_; int crs_per_group_; int group_idx_offset_c_; int channels_per_group_; int offset_k_[ThreadMap::Iterations::kStrided]; int group_idx_offset_k_[ThreadMap::Iterations::kStrided]; public: CUTLASS_HOST_DEVICE Conv2dFpropFilterTileAccessIteratorAnalytic( Params const &params, Conv2dProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() ): params_(params), problem_size_(problem_size), pointer_(reinterpret_cast<char const *>(ptr)), crs_cnt_(0), group_idx_offset_c_(0), filter_r_(0), filter_s_(0), filter_c_(0) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); filter_c_ = threadblock_offset.row() + thread_coord.contiguous(); auto input_channels = (IsDeconv ? problem_size_.K : problem_size_.C); auto output_channels = (IsDeconv ? problem_size_.C : problem_size_.K); if (kGroupMode != conv::GroupMode::kNone) { filter_c_init_ = filter_c_; if (kGroupMode == conv::GroupMode::kDepthwise){ channels_per_group_ = 1; crs_per_group_ = problem_size_.S * problem_size_.R; } else { channels_per_group_ = input_channels / problem_size_.groups; crs_per_group_ = problem_size_.S * problem_size_.R * ((channels_per_group_ + Shape::kRow - 1) / Shape::kRow); } } CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { offset_k_[s] = threadblock_offset.column() + thread_coord.strided() + s * ThreadMap::Delta::kStrided; if (kGroupMode != conv::GroupMode::kNone && kGroupMode != conv::GroupMode::kDepthwise) { group_idx_offset_k_[s] = (thread_coord.strided() + s * ThreadMap::Delta::kStrided) / (output_channels / problem_size_.groups); } } set_iteration_index(0); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_vector_ = index % kAccessesPerVector; int residual_access = index / kAccessesPerVector; iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous; iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset * 8 / sizeof_bits<Element>::value; } CUTLASS_HOST_DEVICE void advance() { // moves to the next tile if (kGroupMode != conv::GroupMode::kNone) { ++crs_cnt_; } ++filter_s_; if (filter_s_ < problem_size_.S) { return; } filter_s_ = 0; ++filter_r_; if (filter_r_ < problem_size_.R) { return; } filter_r_ = 0; if (kGroupMode == conv::GroupMode::kNone) { filter_c_ += Shape::kRow * problem_size_.split_k_slices; } else { if (crs_cnt_ == crs_per_group_) { crs_cnt_ = 0; filter_c_ = filter_c_init_; if (kGroupMode != conv::GroupMode::kDepthwise) { // moves to next group ++group_idx_offset_c_; } } else { filter_c_ += Shape::kRow * problem_size_.split_k_slices; } } } /// Returns the coordinate in the filter tensor W that is currently pointed to /// by the iterator. CUTLASS_HOST_DEVICE TensorCoord at() const { int k = offset_k_[iteration_strided_]; int c = filter_c_ + iteration_vector_ * AccessType::kElements; return TensorCoord(k, filter_r_, filter_s_, c); } /// Returns true if the current coordinate is within the activations tensor W CUTLASS_HOST_DEVICE bool valid() const { TensorCoord coord = at(); auto input_channels = (IsDeconv ? problem_size_.K : problem_size_.C); auto output_channels = (IsDeconv ? problem_size_.C : problem_size_.K); if (kGroupMode == conv::GroupMode::kNone) { return coord.n() < output_channels && coord.c() < input_channels; } else if (kGroupMode == conv::GroupMode::kDepthwise) { return coord.n() < output_channels && coord.c() < 1; // channels_per_group_ is always equal to ONE. } else { return coord.n() < output_channels && coord.c() < channels_per_group_ && group_idx_offset_c_ == group_idx_offset_k_[iteration_strided_]; } } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { TensorCoord coord = at(); LongIndex offset = params_.layout(coord); return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8); } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv2dFpropFilterTileAccessIteratorAnalytic &operator++() { ++iteration_vector_; if (iteration_vector_ < kAccessesPerVector) { return *this; } iteration_vector_ = 0; ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(Conv2dProblemSize const &problem_size) { auto input_channels = (IsDeconv ? problem_size.K : problem_size.C); auto output_channels = (IsDeconv ? problem_size.C : problem_size.K); // check alignment constraint on iterator's contiguous dimension if ((input_channels / problem_size.groups) % AccessType::kElements) { return Status::kErrorInvalidProblem; } if (platform::is_same<Layout, layout::TensorCxRSKx<32>>::value) { if (output_channels % 32) { return Status::kErrorInvalidProblem; } } if (platform::is_same<Layout, layout::TensorCxRSKx<64>>::value) { if (output_channels % 64) { return Status::kErrorInvalidProblem; } } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h", "repo_id": "cutlass", "token_count": 3862 }
26
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile) matrix from memory. This iterator assumes TensorNDHWC layout of tensors in Global Memory. The iterator is specialized for each of the three convolution operators: forward propagation (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad). */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/conv/threadblock/conv3d_params.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename Element_, typename ThreadMap_, bool IsDeconv_ = false > class Conv3dFpropFilterTileAccessIteratorAnalytic { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = layout::TensorNDHWC; using ThreadMap = ThreadMap_; using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; using TensorRef = cutlass::TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static bool const IsDeconv = IsDeconv_; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic; static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided; static int const kConvDim = 3; using ConvProblemSize = typename conv::Conv3dProblemSize; static int const kAccessesPerVector = 1; // // Simplifying assertions // static_assert(ThreadMap::Iterations::kContiguous == 1, "Require Iterations::kContiguous == 1"); // // Parameters structure // using Params = Conv3dAnalyticParams<Layout>; private: Params const &params_; ConvProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; char const *pointer_; int filter_t_; int filter_r_; int filter_s_; int filter_c_; int offset_k_[ThreadMap::Iterations::kStrided]; public: CUTLASS_HOST_DEVICE Conv3dFpropFilterTileAccessIteratorAnalytic( Params const &params, ConvProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() ): params_(params), problem_size_(problem_size), pointer_(reinterpret_cast<char const *>(ptr)), filter_t_(0), filter_r_(0), filter_s_(0), filter_c_(0) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); filter_c_ = threadblock_offset.row() + thread_coord.contiguous(); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { offset_k_[s] = threadblock_offset.column() + thread_coord.strided() + s * ThreadMap::Delta::kStrided; } set_iteration_index(0); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; iteration_strided_ = index / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset * 8 / sizeof_bits<Element>::value; } CUTLASS_HOST_DEVICE void advance() { // moves to the next tile ++filter_s_; if (filter_s_ < problem_size_.S) { return; } filter_s_ = 0; ++filter_r_; if (filter_r_ < problem_size_.R) { return; } filter_r_ = 0; ++filter_t_; if (filter_t_ < problem_size_.T) { return; } filter_t_ = 0; filter_c_ += Shape::kRow * problem_size_.split_k_slices; } /// Returns the coordinate in the filter tensor W that is currently pointed to /// by the iterator. CUTLASS_HOST_DEVICE TensorCoord at() const { int k = offset_k_[iteration_strided_]; return TensorCoord(k, filter_t_, filter_r_, filter_s_, filter_c_); } /// Returns true if the current coordinate is within the activations tensor W CUTLASS_HOST_DEVICE bool valid() const { TensorCoord coord = at(); auto input_channels = (IsDeconv ? problem_size_.K : problem_size_.C); auto output_channels = (IsDeconv ? problem_size_.C : problem_size_.K); return coord.n() < output_channels && coord.c() < input_channels; } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { TensorCoord coord = at(); LongIndex offset = params_.layout(coord); return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8); } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv3dFpropFilterTileAccessIteratorAnalytic &operator++() { ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(ConvProblemSize const &problem_size) { auto input_channels = (IsDeconv ? problem_size.K : problem_size.C); auto output_channels = (IsDeconv ? problem_size.C : problem_size.K); // check alignment constraint on iterator's contiguous dimension if (input_channels % (128/sizeof_bits<Element>::value)) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_analytic.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_analytic.h", "repo_id": "cutlass", "token_count": 2671 }
27
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a multistage threadblock-scoped Implicit GEMM Convolution kernel. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/cache_operation.h" #include "cutlass/gemm/threadblock/mma_base.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class ImplicitGemmMultistage : public gemm::threadblock::MmaBase<Shape_, Policy_, Stages> { public: ///< Base class using Base = gemm::threadblock::MmaBase<Shape_, Policy_, Stages>; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Iterates over tiles of A operand in global memory using IteratorA = IteratorA_; ///< Iterates over tiles of B operand in global memory using IteratorB = IteratorB_; ///< Policy describing tuning details using Policy = Policy_; using SmemIteratorA = SmemIteratorA_; using SmemIteratorB = SmemIteratorB_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; // // Dependent types // /// Fragment of accumulator tile using ElementC = typename Policy::Operator::ElementC; using FragmentC = typename Policy::Operator::FragmentC; /// Warp-level Mma using Operator = typename Policy::Operator; /// Internal structure exposed for introspection. struct Detail { /// Number of cp.async instructions to load one stage of operand A static int const AsyncCopyIterationsPerStageA = IteratorA::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB = IteratorB::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of cp.async instructions to load on group of operand A static int const kAccessesPerGroupA = (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB = (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; // Optional staged-accumulation (e.g., tf32x3 kernels) for improved numerical // accuracy, where each mainloop iteration first accumulates into a temporary // set of freshly-cleared accumulators, which are subsequently added to the // final accumulator set. static bool const kStagedAccumulation = arch::detail::UseStagedAccumulation<Operator>::value; }; private: using WarpLoadedFragmentA = typename Operator::FragmentA; using WarpLoadedFragmentB = typename Operator::FragmentB; using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA smem_iterator_A_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB smem_iterator_B_; public: /// Construct from tensor references CUTLASS_DEVICE ImplicitGemmMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx ): Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); } CUTLASS_DEVICE void copy_tiles_and_advance( IteratorA &iterator_A, IteratorB &iterator_B, int group_start_A = 0, int group_start_B = 0) { iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector); this->smem_iterator_A_.set_iteration_index(group_start_A); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, iterator_A.get(), iterator_A.valid()); ++iterator_A; } ++this->smem_iterator_A_; } } iterator_B.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector); this->smem_iterator_B_.set_iteration_index(group_start_B); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, iterator_B.get(), iterator_B.valid()); ++iterator_B; } ++this->smem_iterator_B_; } } } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations, ///< destination accumulator tile FragmentC &accum, ///< iterator over A operand in global memory IteratorA iterator_A, ///< iterator over B operand in global memory IteratorB iterator_B, ///< initial value of accumulator FragmentC const &src_accum, ///< number of iterations per channel int gemm_k_iterations_per_channel = 0, ///< Imaginary strides used for planar-complex only - ignored here int64_t imag_stride_A = 0, int64_t imag_stride_B = 0) { // // Prologue // // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) { iterator_A.set_iteration_index(0); this->smem_iterator_A_.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, iterator_A.get(), iterator_A.valid()); ++iterator_A; } ++this->smem_iterator_A_; } iterator_B.set_iteration_index(0); this->smem_iterator_B_.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, iterator_B.get(), iterator_B.valid()); ++iterator_B; } ++this->smem_iterator_B_; } // Move to the next stage iterator_A.advance(); iterator_B.advance(); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); } // Perform accumulation in the 'd' output operand accum = src_accum; // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA warp_loaded_frag_A[2]; WarpLoadedFragmentB warp_loaded_frag_B[2]; WarpTransformedFragmentA warp_transformed_frag_A[2]; WarpTransformedFragmentB warp_transformed_frag_B[2]; Operator warp_mma; this->warp_tile_iterator_A_.set_kgroup_index(0); this->warp_tile_iterator_B_.set_kgroup_index(0); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; // Start issuing the first group of the next stage outside of the mainloop copy_tiles_and_advance(iterator_A, iterator_B); int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], warp_loaded_frag_A[0], warp_loaded_frag_B[0]); // tf32x3 kernels use staging accumulation. warp_mma uses a temporary // accumulator and this temporary accumulator is added to the final // accumulator once in every mainloop iteration. plus<FragmentC> plus_accum; FragmentC tmp_accum; if (Detail::kStagedAccumulation) { tmp_accum.clear(); } // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations > (-Base::kStages + 1);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; if (warp_mma_k > 0) warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], warp_loaded_frag_A[warp_mma_k % 2], warp_loaded_frag_B[warp_mma_k % 2]); // Issue global->shared copies for the next stage int group_start_iteration_A, group_start_iteration_B; if (warp_mma_k + 1 == Base::kWarpGemmIterations) { group_start_iteration_A = 0; group_start_iteration_B = 0; } else { group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; } copy_tiles_and_advance(iterator_A, iterator_B, group_start_iteration_A, group_start_iteration_B); if (Detail::kStagedAccumulation) { warp_mma( tmp_accum, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], tmp_accum ); if (warp_mma_k == 0) { accum = plus_accum(accum, tmp_accum); tmp_accum.clear(); } } else { warp_mma( accum, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], accum ); } if (warp_mma_k + 1 == Base::kWarpGemmIterations) warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], warp_transformed_frag_B[(warp_mma_k + 1) % 2], warp_loaded_frag_A[(warp_mma_k + 1) % 2], warp_loaded_frag_B[(warp_mma_k + 1) % 2]); if (warp_mma_k + 2 == Base::kWarpGemmIterations) { // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages of cp.async have committed arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_A.advance(); iterator_B.advance(); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A_.add_tile_offset( {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_B_.add_tile_offset( {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations; } } } if (Detail::kStagedAccumulation) { accum = plus_accum(accum, tmp_accum); } // Insert fence and wait for all outstanding cp.async operations to commit. cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/implicit_gemm_multistage.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/implicit_gemm_multistage.h", "repo_id": "cutlass", "token_count": 8208 }
28
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/numeric_types.h" #include "cute/layout.hpp" #include "cute/util/type_traits.hpp" #include "cute/arch/copy_sm90_tma.hpp" //////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::detail { //////////////////////////////////////////////////////////////////////////////////////////////////// // For each cutlass::layout, provides its corresponding cute stride types, 64b by default template <class L> struct TagToStrideA { using type = L; }; // Maps to modes [M, K, L] template <> struct TagToStrideA<layout::RowMajor> { using type = cute::Stride<int64_t, cute::Int<1>, int64_t>; using tag = layout::RowMajor; }; // Maps to modes [M, K, L] template <> struct TagToStrideA<layout::ColumnMajor> { using type = cute::Stride<cute::Int<1>, int64_t, int64_t>; using tag = layout::ColumnMajor; }; template <class L> struct TagToStrideB { using type = L; }; // Maps to modes [N, K, L] template <> struct TagToStrideB<layout::RowMajor> { using type = cute::Stride<cute::Int<1>, int64_t, int64_t>; using tag = layout::RowMajor; }; // Maps to modes [N, K, L] template <> struct TagToStrideB<layout::ColumnMajor> { using type = cute::Stride<int64_t, cute::Int<1>, int64_t>; using tag = layout::ColumnMajor; }; // For each cutlass::layout *, provides its corresponding cute stride types, 64b by default // Used by pointer array and grouped gemm // Maps to modes [M, K, L] template <> struct TagToStrideA<layout::RowMajor *> { using UnderlyingType = cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>; using type = UnderlyingType*; using tag = layout::RowMajor; }; // Maps to modes [M, K, L] template <> struct TagToStrideA<layout::ColumnMajor *> { using UnderlyingType = cute::Stride<cute::Int<1>, int64_t, cute::Int<0>>; using type = UnderlyingType*; using tag = layout::ColumnMajor; }; // Maps to modes [N, K, L] template <> struct TagToStrideB<layout::RowMajor *> { using UnderlyingType = cute::Stride<cute::Int<1>, int64_t, cute::Int<0>>; using type = UnderlyingType*; using tag = layout::RowMajor; }; // Maps to modes [N, K, L] template <> struct TagToStrideB<layout::ColumnMajor *> { using UnderlyingType = cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>; using type = UnderlyingType*; using tag = layout::ColumnMajor; }; // Maps to modes [M, N, L] template <class LayoutTag> struct TagToStrideC : TagToStrideA<LayoutTag> { }; // Conv: Maps to modes ((P,N), C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride template <> struct TagToStrideC<cutlass::layout::TensorNWC> { using type = cute::Stride<cute::Stride<int64_t, int64_t>, cute::Int<1>, cute::Int<0>>; }; // Conv: Maps to modes (PN, C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride template <> struct TagToStrideC<cutlass::layout::TensorLinearizedNWC> { using type = cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>; }; // Conv: Maps to modes ((P,Q,N), C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride template <> struct TagToStrideC<cutlass::layout::TensorNHWC> { using type = cute::Stride<cute::Stride<int64_t, int64_t, int64_t>, cute::Int<1>, cute::Int<0>>; }; // Conv: Maps to modes (PQN, C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride template <> struct TagToStrideC<cutlass::layout::TensorLinearizedNHWC> { using type = cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>; }; // Conv: Maps to modes ((P,Q,Z,N), C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride template <> struct TagToStrideC<cutlass::layout::TensorNDHWC> { using type = cute::Stride<cute::Stride<int64_t, int64_t, int64_t, int64_t>, cute::Int<1>, cute::Int<0>>; }; // Conv: Maps to modes (PQZN, C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride template <> struct TagToStrideC<cutlass::layout::TensorLinearizedNDHWC> { using type = cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>; }; // Conv: Maps to modes (K, (C,S), _0) for compatiblity with GEMM epilogues expecting a batch mode stride template <> struct TagToStrideC<cutlass::layout::TensorKCS> { using type = cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t>, cute::Int<0>>; }; // Conv: Maps to modes (K, (C,S,R), _0) for compatiblity with GEMM epilogues expecting a batch mode stride template <> struct TagToStrideC<cutlass::layout::TensorKCSR> { using type = cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t, int64_t>, cute::Int<0>>; }; // Conv: Maps to modes (K, (C,S,R,T), _0) for compatiblity with GEMM epilogues expecting a batch mode stride template <> struct TagToStrideC<cutlass::layout::TensorKCSRT> { using type = cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t, int64_t, int64_t>, cute::Int<0>>; }; // Convenience aliases template<class LayoutTag> using TagToStrideA_t = typename TagToStrideA<LayoutTag>::type; template<class LayoutTag> using TagToStrideB_t = typename TagToStrideB<LayoutTag>::type; template<class LayoutTag> using TagToStrideC_t = typename TagToStrideC<LayoutTag>::type; //////////////////////////////////////////////////////////////////////////////////////////////////// // For 2.x compatibility APIs, provide stride->layout tag mappers template<int ModeIndex, class Stride> constexpr bool is_major(Stride = {}) { // Account for stride types with and without batch mode and batch modes with static zero stride return cute::is_constant<1, decltype(cute::front(cute::get<ModeIndex>(cute::remove_pointer_t<Stride>{})))>::value; } // Note : This method can be used for deducing the Layout Tag of A, C, D Matrices template<class StrideA> constexpr auto stride_to_layout_tag_A() { if constexpr (is_major<0, StrideA>()) { // M major return layout::ColumnMajor{}; } else { // K major return layout::RowMajor{}; } CUTE_GCC_UNREACHABLE; } template<class StrideB> constexpr auto stride_to_layout_tag_B() { if constexpr (is_major<0, StrideB>()) { // N major return layout::RowMajor{}; } else { // K major return layout::ColumnMajor{}; } CUTE_GCC_UNREACHABLE; } template<class StrideC> constexpr auto stride_to_layout_tag_C() { if constexpr (is_major<0, StrideC>()) { // M major return layout::ColumnMajor{}; } else { // N major return layout::RowMajor{}; } CUTE_GCC_UNREACHABLE; } // Utilities to map Stride back on to their corresponding layout tags template <class S> struct StrideToLayoutTagA { using type = decltype(detail::stride_to_layout_tag_A<S>()); }; template <class S> struct StrideToLayoutTagB { using type = decltype(detail::stride_to_layout_tag_B<S>()); }; template <class S> struct StrideToLayoutTagC { using type = decltype(detail::stride_to_layout_tag_C<S>()); }; // Convenience aliases template<class S> using StrideToLayoutTagA_t = typename StrideToLayoutTagA<S>::type; template<class S> using StrideToLayoutTagB_t = typename StrideToLayoutTagB<S>::type; template<class S> using StrideToLayoutTagC_t = typename StrideToLayoutTagC<S>::type; //////////////////////////////////////////////////////////////////////////////////////////////////// // Inspects a tiled copy and whether its copy engine is TMA or not template<class GmemTiledCopy> constexpr bool is_tma_copy_engine() { if constexpr (cute::is_void_v<GmemTiledCopy>) { return false; } else { if constexpr ( cute::is_base_of_v<cute::SM90_TMA_LOAD, GmemTiledCopy> || cute::is_base_of_v<cute::SM90_TMA_LOAD_MULTICAST, GmemTiledCopy> || cute::is_base_of_v<cute::SM90_TMA_LOAD_IM2COL, GmemTiledCopy> || cute::is_base_of_v<cute::SM90_TMA_LOAD_IM2COL_MULTICAST, GmemTiledCopy> || cute::is_base_of_v<cute::SM90_TMA_STORE, GmemTiledCopy> || cute::is_base_of_v<cute::SM90_TMA_STORE_IM2COL, GmemTiledCopy> ) { return true; } } return false; } template <class X, class = void> struct RawDtype { using type = X; }; template <class X> struct RawDtype<X,cute::void_t<typename X::raw_type>> { using type = typename X::raw_type; }; // Inspects a TiledCopy and returns its alignment in terms of element count template <class GmemTiledCopy, class Element, class ElementMma = Element> constexpr int get_alignment_count_from_gmem_tiled_copy() { if constexpr (cute::is_void_v<GmemTiledCopy>) { return 1; } // Account for ElementC = void kernels else if constexpr (cute::is_void_v<Element>) { return 0; } else { // For TMA tiled copies, we know the alignment has to be 128 bits if constexpr (is_tma_copy_engine<GmemTiledCopy>()) { return 128 / sizeof_bits<Element>::value; } else { // For non-TMA tiled copies, TiledCopy holds the alignment count directly in its TiledShape_MN return GmemTiledCopy::NumValSrc; } } } // Return the shape that is associated with stride-1 mode, or 1 if not found template<typename Shape, typename Stride> CUTLASS_HOST_DEVICE constexpr auto get_contiguous_shape(Shape const & shape, Stride const & stride) { using namespace cute; auto idx = find_if(append(flatten(stride), _1{}), [](auto s){ return is_constant<1,decltype(s)>{}; }); return get<decltype(idx)::value>(append(flatten(shape), _1{})); } // Check if tensor shape satisfies a given major alignment template<int Alignment, class Shape, class Stride> CUTLASS_HOST_DEVICE constexpr bool check_alignment(Shape const & shape, Stride const & stride) { return is_major<0>(stride) ? get_contiguous_shape(cute::get<0>(shape), cute::get<0>(stride)) % Alignment == 0 : get_contiguous_shape(cute::get<1>(shape), cute::get<1>(stride)) % Alignment == 0; } // Check if tensor shape satisfies a given major alignment template<int B, int M, int S> CUTLASS_HOST_DEVICE constexpr size_t alignment_for_swizzle(cute::Swizzle<B, M, S>) { static_assert(B >= 0 and M >= 0); return size_t(1) << size_t(B + M + cute::abs(S)); } template<class Layout> CUTLASS_HOST_DEVICE constexpr size_t alignment_for_swizzle(Layout layout) { return alignment_for_swizzle(cute::detail::get_swizzle_portion(layout)); } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::detail
cutlass/include/cutlass/detail/layout.hpp/0
{ "file_path": "cutlass/include/cutlass/detail/layout.hpp", "repo_id": "cutlass", "token_count": 4353 }
29
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Fusion callbacks specializations for the sm90 TMA warp-specialized (ws) epilogue */ #pragma once #include "cutlass/cutlass.h" #include "cute/tensor.hpp" #include "cutlass/epilogue/dispatch_policy.hpp" #include "cutlass/epilogue/fusion/callbacks.hpp" #include "cutlass/epilogue/fusion/sm90_visitor_tma_warpspecialized.hpp" #include "cutlass/epilogue/fusion/sm90_visitor_load_tma_warpspecialized.hpp" #include "cutlass/epilogue/fusion/sm90_visitor_store_tma_warpspecialized.hpp" #include "cutlass/epilogue/fusion/sm90_visitor_compute_tma_warpspecialized.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::epilogue::fusion { ///////////////////////////////////////////////////////////////////////////////////////////////// template <class NodeOp, class... ChildOps> using Sm90EVT = Sm90TreeVisitor<NodeOp, ChildOps...>; // D = alpha * acc template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class ElementOutput, class ElementCompute, class ElementScalar, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::ScaledAcc<ElementOutput, ElementCompute, ElementScalar, RoundStyle>, CtaTileShapeMNK, EpilogueTile > : Sm90EVT<Sm90Compute<multiplies, ElementOutput, ElementCompute, RoundStyle>, Sm90ScalarBroadcast<ElementScalar>, Sm90AccFetch > { using Impl = Sm90EVT<Sm90Compute<multiplies, ElementOutput, ElementCompute, RoundStyle>, Sm90ScalarBroadcast<ElementScalar>, Sm90AccFetch >; using Operation = fusion::ScaledAcc<ElementOutput, ElementCompute, ElementScalar, RoundStyle>; struct Arguments { // Give a name and flat ordering to the fusion callback args ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; // Conversion to the args expected by the visitor implementation // to_underlying_arguments will implicitly call this operator typename Impl::Arguments() const { return { // binary op : alpha * acc {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {} // binary args : multiplies }; // end binary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // D = alpha * acc + beta * C template< class ElementOutput, class ElementCompute, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90LinearCombination = Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc) Sm90ScalarBroadcast<ElementScalar>, // beta Sm90SrcFetch<ElementSource>, // C Sm90EVT<Sm90Compute<multiplies, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc Sm90ScalarBroadcast<ElementScalar>, // alpha Sm90AccFetch // acc > >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class ElementOutput, class ElementCompute, class ElementSource, class ElementScalar, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::LinearCombination<ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle>, CtaTileShapeMNK, EpilogueTile > : Sm90LinearCombination<typename cutlass::detail::get_unpacked_element_type<ElementOutput>::type, ElementCompute, ElementSource, ElementScalar, RoundStyle> { using Impl = Sm90LinearCombination<typename cutlass::detail::get_unpacked_element_type<ElementOutput>::type, ElementCompute, ElementSource, ElementScalar, RoundStyle>; using Operation = fusion::LinearCombination<ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle>; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; operator typename Impl::Arguments() const { return { // ternary op : beta * C + (alpha * acc) {{beta}, {beta_ptr}}, // leaf args : beta {}, // leaf args : C { // binary op : alpha * acc {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {} // binary args : multiplies }, // end binary op {} // ternary args : multiply_add }; // end ternary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // D = activation(alpha * acc + beta * C) template< template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90LinCombEltAct = Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>, // activation(beta * C + (alpha * acc)) Sm90LinearCombination<ElementCompute, ElementCompute, ElementSource, ElementScalar, RoundStyle> // beta * C + (alpha * acc) >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementSource, class ElementScalar, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::LinCombEltAct<ActivationFn, ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle>, CtaTileShapeMNK, EpilogueTile > : Sm90LinCombEltAct<ActivationFn, ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle> { using Impl = Sm90LinCombEltAct<ActivationFn, typename cutlass::detail::get_unpacked_element_type<ElementOutput>::type, ElementCompute, ElementSource, ElementScalar, RoundStyle>; using Operation = fusion::LinCombEltAct<ActivationFn, ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle>; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); operator typename Impl::Arguments() const { return { // unary op: activation(beta * C + (alpha * acc)) { // ternary op : beta * C + (alpha * acc) {{beta}, {beta_ptr}}, // leaf args : beta {}, // leaf args : C { // binary op : alpha * acc {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {} // binary args : multiplies }, // end binary op {} // ternary args : multiply_add }, // end ternary op activation // unary args: activation }; // end unary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // D = alpha * acc + beta * C + per-row bias template< class CtaTileShapeMNK, class ElementOutput, class ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90LinCombPerRowBias = Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc + bias) Sm90ScalarBroadcast<ElementScalar>, // beta Sm90SrcFetch<ElementSource>, // C Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc + bias Sm90ScalarBroadcast<ElementScalar>, // alpha Sm90AccFetch, // acc Sm90ColBroadcast<0, CtaTileShapeMNK, ElementBias, Stride<_1,_0,int>, AlignmentBias> // bias > >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class ElementOutput, class ElementCompute, class ElementBias, class ElementSource, class ElementScalar, int AlignmentBias, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::LinCombPerRowBias<ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>, CtaTileShapeMNK, EpilogueTile > : Sm90LinCombPerRowBias< CtaTileShapeMNK, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle> { using Impl = Sm90LinCombPerRowBias< CtaTileShapeMNK, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>; using Operation = fusion::LinCombPerRowBias< ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; using StrideBias = Stride<_1,_0,int>; ElementBias const* bias_ptr = nullptr; StrideBias dBias = {}; operator typename Impl::Arguments() const { return { // ternary op : beta * C + (alpha * acc + bias) {{beta}, {beta_ptr}}, // leaf args : beta {}, // leaf args : C { // ternary op : alpha * acc + bias {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {bias_ptr, ElementBias(0), dBias}, // leaf args : bias {} // ternary args : multiply_add }, // end ternary op {} // ternary args : multiply_add }; // end ternary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // D = activation(alpha * acc + beta * C + per-row bias) template< class CtaTileShapeMNK, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90LinCombPerRowBiasEltAct = Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>, Sm90LinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle> >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementBias, class ElementSource, class ElementScalar, int AlignmentBias, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::LinCombPerRowBiasEltAct< ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle >, CtaTileShapeMNK, EpilogueTile > : Sm90LinCombPerRowBiasEltAct< CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle > { using Impl = Sm90LinCombPerRowBiasEltAct< CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle >; using Operation = fusion::LinCombPerRowBiasEltAct< ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle >; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; using StrideBias = Stride<_1,_0,int>; ElementBias const* bias_ptr = nullptr; StrideBias dBias = {}; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); operator typename Impl::Arguments() const { return { // unary op : activation(beta * C + (alpha * acc + bias)) { // ternary op : beta * C + (alpha * acc + bias) {{beta}, {beta_ptr}}, // leaf args : beta {}, // leaf args : C { // ternary op : alpha * acc + bias {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {bias_ptr, ElementBias(0), dBias}, // leaf args : bias {} // ternary args : multiply_add }, // end ternary op {} // ternary args : multiply_add }, // end ternary op activation // unary args : activation }; // end unary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // D = activation(alpha * acc + beta * C + per-row bias) // Aux = alpha * acc + beta * C + per-row bias) template< class CtaTileShapeMNK, class EpilogueTile, int Stages, class StrideAux, class SmemLayoutAtom, class CopyOpR2S, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux = ElementOutput, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentAux = 128 / sizeof_bits_v<ElementAux>, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90LinCombPerRowBiasEltActAux = Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>, Sm90EVT<Sm90AuxStore<Stages, EpilogueTile, ElementAux, RoundStyle, StrideAux, SmemLayoutAtom, CopyOpR2S, AlignmentAux>, Sm90LinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle> > >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class GmemLayoutTagAux, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux, class ElementBias, class ElementSource, class ElementScalar, int AlignmentAux, int AlignmentBias, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile, class SmemLayoutAtom, class CopyOpR2S > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::LinCombPerRowBiasEltActAux< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >, CtaTileShapeMNK, EpilogueTile, SmemLayoutAtom, CopyOpR2S > : Sm90LinCombPerRowBiasEltActAux< CtaTileShapeMNK, EpilogueTile, StagesD, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpR2S, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle > { using Impl = Sm90LinCombPerRowBiasEltActAux< CtaTileShapeMNK, EpilogueTile, StagesD, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpR2S, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >; using Operation = fusion::LinCombPerRowBiasEltActAux< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; using StrideBias = Stride<_1,_0,int>; ElementBias const* bias_ptr = nullptr; StrideBias dBias = {}; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); using StrideAux = cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>; ElementAux* aux_ptr = nullptr; StrideAux dAux = {}; operator typename Impl::Arguments() const { return { // unary op : activation(store(beta * C + (alpha * acc + bias))) { // unary op : store(beta * C + (alpha * acc + bias)) { // ternary op : beta * C + (alpha * acc + bias) {{beta}, {beta_ptr}}, // leaf args : beta {}, // leaf args : C { // ternary op : alpha * acc + bias {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {bias_ptr, ElementBias(0), dBias}, // leaf args : bias {} // ternary args : multiply_add }, // end ternary op {} // ternary args : multiply_add }, // end ternary op {aux_ptr, dAux} // unary args : store }, // end unary op activation // unary args : activation }; // end unary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // D = per-row alpha * acc + per-row beta * C + per-row bias template< class CtaTileShapeMNK, class ElementOutput, class ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, int AlignmentScalar = 128 / sizeof_bits_v<ElementScalar>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90PerRowLinCombPerRowBias = Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc + bias) Sm90ColBroadcast<0, CtaTileShapeMNK, ElementScalar, Stride<_1,_0,int>, AlignmentScalar>, // beta Sm90SrcFetch<ElementSource>, // C Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc + bias Sm90ColBroadcast<0, CtaTileShapeMNK, ElementScalar, Stride<_1,_0,int>, AlignmentScalar>, // alpha Sm90AccFetch, // acc Sm90ColBroadcast<0, CtaTileShapeMNK, ElementBias, Stride<_1,_0,int>, AlignmentBias> // bias > >; // D = activation(per-row alpha * acc + per-row beta * C + per-row bias) template< class CtaTileShapeMNK, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, int AlignmentScalar = 128 / sizeof_bits_v<ElementScalar>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90PerRowLinCombPerRowBiasEltAct = Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>, Sm90PerRowLinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle> >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementBias, class ElementSource, class ElementScalar, int AlignmentBias, int AlignmentScalar, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::PerRowLinCombPerRowBiasEltAct< ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle >, CtaTileShapeMNK, EpilogueTile > : Sm90PerRowLinCombPerRowBiasEltAct< CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle > { using Impl = Sm90PerRowLinCombPerRowBiasEltAct< CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle >; using Operation = fusion::PerRowLinCombPerRowBiasEltAct< ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle >; struct Arguments { using StrideAlpha = Stride<_1,_0,int>; using StrideBeta = Stride<_1,_0,int>; ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; StrideAlpha dAlpha = {}; StrideBeta dBeta = {}; using StrideBias = Stride<_1,_0,int>; ElementBias const* bias_ptr = nullptr; StrideBias dBias = {}; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); operator typename Impl::Arguments() const { return { // unary op : activation(beta * C + (alpha * acc + bias)) { // ternary op : beta * C + (alpha * acc + bias) {beta_ptr, beta, dBeta}, // leaf args : beta {}, // leaf args : C { // ternary op : alpha * acc + bias {alpha_ptr, alpha, dAlpha}, // leaf args : alpha {}, // leaf args : acc {bias_ptr, ElementBias(0), dBias}, // leaf args : bias {} // ternary args : multiply_add }, // end ternary op {} // ternary args : multiply_add }, // end ternary op activation // unary args : activation }; // end unary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template <typename T> constexpr bool is_fp8_v = cute::is_same_v<T,float_e4m3_t> || cute::is_same_v<T,float_e5m2_t>; // We only apply the scaling factor if output is fp8 template <typename ElementOutput> struct ScaleOutOp { template <typename T> using Op = cutlass::first<T>; }; template <> struct ScaleOutOp<float_e4m3_t> { template <typename T> using Op = cutlass::multiplies<T>; }; template <> struct ScaleOutOp<float_e5m2_t> { template <typename T> using Op = cutlass::multiplies<T>; }; template <typename T> using amax = cutlass::maximum_absolute_value_reduction<T, true>; // propogate nans }; // end namespace detail // D = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias template< class CtaTileShapeMNK, class ElementOutput, class ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90ScaledLinCombPerRowBias = Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc + bias) Sm90ScalarBroadcast<ElementScalar, Stride<_0,_0,_0>, 2>, // scale_c * beta Sm90SrcFetch<ElementSource>, // C Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc + bias Sm90ScalarBroadcast<ElementScalar, Stride<_0,_0,_0>, 3>, // scale_a * scale_b * alpha Sm90AccFetch, // acc Sm90ColBroadcast<0, CtaTileShapeMNK, ElementBias, Stride<_1,_0,int>, AlignmentBias> // bias > >; // Z = scale_a * scale_b * alpha * acc + beta * scale_c * C + per-row bias // if D is fp8 // D = scale_d * activation(Z) // else // D = activation(Z) template< class CtaTileShapeMNK, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90ScaledLinCombPerRowBiasEltAct = Sm90EVT<Sm90Compute<detail::ScaleOutOp<ElementOutput>::template Op, ElementOutput, ElementCompute, RoundStyle>, // activation(Z) * scale_d Sm90EVT<Sm90Compute<ActivationFn, ElementCompute, ElementCompute, RoundStyle>, // activation(Z) // Z = scale_a * scale_b * alpha * acc + beta * scale_c * C + per-row bias Sm90ScaledLinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle> >, Sm90ScalarBroadcast<ElementScalar> // scale_d >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementBias, class ElementSource, class ElementScalar, int AlignmentBias, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::ScaledLinCombPerRowBiasEltAct< ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle >, CtaTileShapeMNK, EpilogueTile > : Sm90ScaledLinCombPerRowBiasEltAct< CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle > { using Impl = Sm90ScaledLinCombPerRowBiasEltAct< CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle >; using Operation = fusion::ScaledLinCombPerRowBiasEltAct< ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle >; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; ElementScalar scale_a = ElementScalar(1); ElementScalar scale_b = ElementScalar(1); ElementScalar scale_c = ElementScalar(1); ElementScalar scale_d = ElementScalar(1); ElementScalar const* scale_a_ptr = nullptr; ElementScalar const* scale_b_ptr = nullptr; ElementScalar const* scale_c_ptr = nullptr; ElementScalar const* scale_d_ptr = nullptr; using StrideBias = Stride<_1,_0,int>; ElementBias const* bias_ptr = nullptr; StrideBias dBias = {}; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); operator typename Impl::Arguments() const { return { // binary op : activation((scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias)) * scale_d { // unary op : activation((scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias)) { // ternary op : (scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias) {{scale_c, beta}, {scale_c_ptr, beta_ptr} }, // leaf args : (scale_c * beta) {}, // leaf args : C { // ternary op : (scale_a * scale_b * alpha) * acc + bias {{scale_a, scale_b, alpha}, {scale_a_ptr, scale_b_ptr, alpha_ptr} }, // leaf args : (scale_a * scale_b * alpha) {}, // leaf args : acc {bias_ptr, ElementBias(0), dBias}, // leaf args : bias {} // ternary args : multiply_add }, // end ternary op {} // ternary args : multiply_add }, // end ternary op activation // unary args : activation }, // end unary op {{scale_d}, {scale_d_ptr} }, // leaf args : scale_d {} // binary args : multiplies or first }; // end binary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias // if D is fp8 // amax_d = max(abs(elements in activation(Z))) // D = scale_d * activation(Z) // else // D = activation(Z) // if Aux is fp8 // amax_aux = max(abs(elements in Z)) // Aux = scale_aux * Z // else // Aux = Z // fp8 aux specialization template< class CtaTileShapeMNK, class EpilogueTile, int StagesD, class StrideAux, class SmemLayoutAtom, class CopyOpR2S, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux = ElementOutput, class ElementAmax = ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentAux = 128 / sizeof_bits_v<ElementAux>, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90ScaledLinCombPerRowBiasEltActAmaxAuxFp8 = Sm90SplitTreeVisitor< // Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias Sm90ScaledLinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>, // D = activation(Z) * scale_d, amax_d = max(abs(elements in D)) Sm90EVT<Sm90Compute<detail::ScaleOutOp<ElementOutput>::template Op, ElementOutput, ElementCompute, RoundStyle>, // activation(Z) * scale_d Sm90EVT<Sm90ScalarReduction<detail::amax, atomic_maximum, ElementAmax, ElementCompute, RoundStyle>, // amax_d Sm90EVT<Sm90Compute<ActivationFn, ElementCompute, ElementCompute, RoundStyle>, // activation(Z) Sm90SplitTreeFetch // Z > >, Sm90ScalarBroadcast<ElementScalar> // scale_d >, // Aux = Z * scale_aux, amax_aux = max(abs(elements in Aux)) Sm90EVT<Sm90AuxStore<StagesD, EpilogueTile, ElementAux, RoundStyle, StrideAux, SmemLayoutAtom, CopyOpR2S, AlignmentAux>, // store(Aux) Sm90EVT<Sm90Compute<cutlass::multiplies, ElementCompute, ElementCompute, RoundStyle>, // Z * scale_aux Sm90EVT<Sm90ScalarReduction<detail::amax, atomic_maximum, ElementAmax, ElementCompute, RoundStyle>, // amax_aux Sm90SplitTreeFetch // Z >, Sm90ScalarBroadcast<ElementScalar> // scale_aux > > >; // non-fp8 aux specialization // lets us use some EVT specializations such as relu + uint1b_t aux template< class CtaTileShapeMNK, class EpilogueTile, int StagesD, class StrideAux, class SmemLayoutAtom, class CopyOpR2S, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux = ElementOutput, class ElementAmax = ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentAux = 128 / sizeof_bits_v<ElementAux>, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90ScaledLinCombPerRowBiasEltActAmaxAuxNotFp8 = // D = activation(Z) * scale_d, amax_d = max(abs(elements in D)) Sm90EVT<Sm90Compute<detail::ScaleOutOp<ElementOutput>::template Op, ElementOutput, ElementCompute, RoundStyle>, // activation(Z) * scale_d Sm90EVT<Sm90ScalarReduction<detail::amax, atomic_maximum, ElementAmax, ElementCompute, RoundStyle>, // amax_d Sm90EVT<Sm90Compute<ActivationFn, ElementCompute, ElementCompute, RoundStyle>, // activation(Z) Sm90EVT<Sm90AuxStore<StagesD, EpilogueTile, ElementAux, RoundStyle, StrideAux, SmemLayoutAtom, CopyOpR2S, AlignmentAux>, // Aux = Z // Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias Sm90ScaledLinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>, > > >, Sm90ScalarBroadcast<ElementScalar> // scale_d >; // dispatcher template< class CtaTileShapeMNK, class EpilogueTile, int StagesD, class StrideAux, class SmemLayoutAtom, class CopyOpR2S, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux = ElementOutput, class ElementAmax = ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentAux = 128 / sizeof_bits_v<ElementAux>, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90ScaledLinCombPerRowBiasEltActAmaxAux = conditional_t<detail::is_fp8_v<ElementAux>, Sm90ScaledLinCombPerRowBiasEltActAmaxAuxFp8< CtaTileShapeMNK, EpilogueTile, StagesD, StrideAux, SmemLayoutAtom, CopyOpR2S, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar,AlignmentAux, AlignmentBias, RoundStyle >, Sm90ScaledLinCombPerRowBiasEltActAmaxAuxNotFp8< CtaTileShapeMNK, EpilogueTile, StagesD, StrideAux, SmemLayoutAtom, CopyOpR2S, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle > >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class GmemLayoutTagAux, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux, class ElementAmax, class ElementBias, class ElementSource, class ElementScalar, int AlignmentAux, int AlignmentBias, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile, class SmemLayoutAtom, class CopyOpR2S > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::ScaledLinCombPerRowBiasEltActAmaxAux< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >, CtaTileShapeMNK, EpilogueTile, SmemLayoutAtom, CopyOpR2S > : Sm90ScaledLinCombPerRowBiasEltActAmaxAux< CtaTileShapeMNK, EpilogueTile, StagesD, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpR2S, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle > { using Impl = Sm90ScaledLinCombPerRowBiasEltActAmaxAux< CtaTileShapeMNK, EpilogueTile, StagesD, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpR2S, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >; using Operation = fusion::ScaledLinCombPerRowBiasEltActAmaxAux< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; ElementScalar scale_a = ElementScalar(1); ElementScalar scale_b = ElementScalar(1); ElementScalar scale_c = ElementScalar(1); ElementScalar scale_d = ElementScalar(1); ElementScalar const* scale_a_ptr = nullptr; ElementScalar const* scale_b_ptr = nullptr; ElementScalar const* scale_c_ptr = nullptr; ElementScalar const* scale_d_ptr = nullptr; ElementScalar scale_aux = ElementScalar(1); ElementScalar const* scale_aux_ptr = nullptr; using StrideBias = Stride<_1,_0,int>; ElementBias const* bias_ptr = nullptr; StrideBias dBias = {}; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); ElementAmax* amax_D_ptr = nullptr; ElementAmax* amax_aux_ptr = nullptr; using StrideAux = cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>; ElementAux* aux_ptr = nullptr; StrideAux dAux = {}; operator typename Impl::Arguments() const { // Only compute amax_d if D is fp8 ElementAmax* amax_D_ptr_ = nullptr; if constexpr (detail::is_fp8_v<ElementOutput>) { amax_D_ptr_ = amax_D_ptr; } // Aux is fp8 -> DAG arguments if constexpr (detail::is_fp8_v<ElementAux>) { typename Impl::Arguments args; // always use structured binding to unpack DAG args since it may or may not be a tuple auto& [Z_args, aux_args, D_args] = args; Z_args = { // ternary op : (scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias) {{scale_c, beta}, {scale_c_ptr, beta_ptr} }, // leaf args : (scale_c * beta) {}, // leaf args : C { // ternary op : (scale_a * scale_b * alpha) * acc + bias {{scale_a, scale_b, alpha}, {scale_a_ptr, scale_b_ptr, alpha_ptr} }, // leaf args : (scale_a * scale_b * alpha) {}, // leaf args : acc {bias_ptr, ElementBias(0), dBias}, // leaf args : bias {} // ternary args : multiply_add }, // end ternary op {} // ternary args : multiply_add }; // end ternary op D_args = { // binary op : activation(Z) * scale_d or activation(Z) { // unary op : reduce(activation(Z)) { // unary op : activation(Z) {}, // leaf args : Z activation // unary args : activation }, // end unary op {amax_D_ptr_} // unary args : reduce }, // end unary op {{scale_d}, {scale_d_ptr} }, // leaf args : scale_d {} // binary args : multiplies or first }; // end binary op aux_args = { // unary op : store(Aux) { // binary op : Z * scale_d or Z { // unary op : reduce(Z) {}, // leaf args : Z {amax_aux_ptr} // unary args : reduce }, // end unary op {{scale_aux}, {scale_aux_ptr} }, // leaf args : scale_d {} // binary args : multiplies }, // end binary op {aux_ptr, dAux} // unary args : store }; // end unary op return args; } // Aux is not fp8 -> Tree arguments else { return { // binary op : activation(Z) * scale_d or activation(Z) { // unary op : reduce(activation(Z)) { // unary op : activation(Z) { // unary op : store(Z) { // ternary op : (scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias) {{scale_c, beta}, {scale_c_ptr, beta_ptr} }, // leaf args : (scale_c * beta) {}, // leaf args : C { // ternary op : (scale_a * scale_b * alpha) * acc + bias {{scale_a, scale_b, alpha}, {scale_a_ptr, scale_b_ptr, alpha_ptr} }, // leaf args : (scale_a * scale_b * alpha) {}, // leaf args : acc {bias_ptr, ElementBias(0), dBias }, // leaf args : bias {} // ternary args : multiply_add }, // end ternary op {} // ternary args : multiply_add }, // end ternary op {aux_ptr, dAux} // unary args : store }, // end unary op activation // unary args : activation }, // end unary op {amax_D_ptr_} // unary args : reduce }, // end unary op {{scale_d},{scale_d_ptr}}, // leaf args : scale_d {} // binary args : multiplies or first }; // end binary op } } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template< class CtaTileShapeMNK, class EpilogueTile, int Stages, class StrideAux, class SmemLayoutAtom, class CopyOpS2R, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentAux = 128 / sizeof_bits_v<ElementAux>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90LinCombDeEltAct = Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>, // activation(beta * C + (alpha * acc), aux) Sm90LinearCombination<ElementCompute, ElementCompute, ElementSource, ElementScalar, RoundStyle>, // beta * C + (alpha * acc) Sm90AuxLoad<Stages, EpilogueTile, ElementAux, StrideAux, SmemLayoutAtom, CopyOpS2R, AlignmentAux> // aux >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class GmemLayoutTagAux, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux, class ElementSource, class ElementScalar, int AlignmentAux, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile, class SmemLayoutAtom, class CopyOpS2R > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::LinCombDeEltAct< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle >, CtaTileShapeMNK, EpilogueTile, SmemLayoutAtom, CopyOpS2R > : Sm90LinCombDeEltAct< CtaTileShapeMNK, EpilogueTile, StagesC, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpS2R, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle > { using Impl = Sm90LinCombDeEltAct< CtaTileShapeMNK, EpilogueTile, StagesC, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpS2R, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle >; using Operation = fusion::LinCombDeEltAct< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle >; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); using StrideAux = cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>; ElementAux const* aux_ptr = nullptr; StrideAux dAux = {}; operator typename Impl::Arguments() const { return { // binary op : activation(beta * C + (alpha * acc), aux) { // ternary op : beta * C + (alpha * acc) {{beta}, {beta_ptr}}, // leaf args : beta {}, // leaf args : C { // binary op : alpha * acc {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {} // binary args : multiplies }, // end binary op {} // ternary args : multiply_add }, // end ternary op {aux_ptr, ElementAux(0), dAux}, // leaf args : aux activation // binary args : activation }; // end binary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template< class CtaTileShapeMNK, class EpilogueTile, int Stages, class StrideAux, class SmemLayoutAtom, class CopyOpS2R, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux = ElementOutput, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentAux = 128 / sizeof_bits_v<ElementAux>, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90LinCombDeEltActDePerRowBias = Sm90EVT<Sm90Compute<cutlass::epilogue::thread::Identity, ElementOutput, ElementCompute, RoundStyle>, // Identity for final conversion Sm90EVT<Sm90ColReduction<plus, plus, plus, 0, CtaTileShapeMNK, ElementBias, ElementCompute, RoundStyle, Stride<_1,_0,int>, AlignmentBias>, Sm90LinCombDeEltAct<CtaTileShapeMNK, EpilogueTile, Stages, StrideAux, SmemLayoutAtom, CopyOpS2R, ActivationFn, ElementCompute, ElementCompute, ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle> > >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class GmemLayoutTagAux, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux, class ElementBias, class ElementSource, class ElementScalar, int AlignmentAux, int AlignmentBias, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile, class SmemLayoutAtom, class CopyOpS2R > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::LinCombDeEltActDePerRowBias< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >, CtaTileShapeMNK, EpilogueTile, SmemLayoutAtom, CopyOpS2R > : Sm90LinCombDeEltActDePerRowBias< CtaTileShapeMNK, EpilogueTile, StagesC, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpS2R, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle > { using Impl = Sm90LinCombDeEltActDePerRowBias< CtaTileShapeMNK, EpilogueTile, StagesC, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpS2R, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >; using Operation = fusion::LinCombDeEltActDePerRowBias< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); using StrideAux = cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>; ElementAux const* aux_ptr = nullptr; StrideAux dAux = {}; using StrideBias = Stride<_1,_0,int>; ElementBias* dbias_ptr = nullptr; StrideBias dDbias = {}; operator typename Impl::Arguments() const { return { // unary op : identity/convert { // unary op : reduce(activation(beta * C + (alpha * acc), aux)) { // binary op : activation(beta * C + (alpha * acc), aux) { // ternary op : beta * C + (alpha * acc) {{beta}, {beta_ptr}}, // leaf args : beta {}, // leaf args : C { // binary op : alpha * acc {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {} // binary args : multiplies }, // end binary op {} // ternary args : multiply_add }, // end ternary op {aux_ptr, ElementAux(0), dAux}, // leaf args : aux activation // binary args : activation }, // end binary op {dbias_ptr, ElementCompute(0), dDbias} // unary args : reduce }, // end unary op {} // unary args : identity/convert }; // end unary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template <class FusionOpOrCallbacks, class = cute::void_t<>> struct get_element_aux { using type = void; }; template <class FusionOpOrCallbacks> struct get_element_aux<FusionOpOrCallbacks, cute::void_t<typename FusionOpOrCallbacks::ElementAux>> { using type = typename FusionOpOrCallbacks::ElementAux; }; template <class NodeOp, class... ChildOps> struct get_element_aux<Sm90TreeVisitor<NodeOp, ChildOps...>, cute::void_t<>> { using type = typename get_element_aux<NodeOp>::type; }; template <class... Ts> struct get_element_aux<FusionCallbacks<Ts...>, cute::void_t<typename FusionCallbacks<Ts...>::Operation>> { private: using Operation = typename FusionCallbacks<Ts...>::Operation; public: using type = typename get_element_aux<Operation>::type; }; } // namespace cutlass:epilogue::fusion::detail template <class Callbacks> using get_element_aux_t = typename detail::get_element_aux<Callbacks>::type; } // namespace cutlass::epilogue::fusion ///////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/fusion/sm90_callbacks_tma_warpspecialized.hpp/0
{ "file_path": "cutlass/include/cutlass/epilogue/fusion/sm90_callbacks_tma_warpspecialized.hpp", "repo_id": "cutlass", "token_count": 21890 }
30
/*************************************************************************************************** * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination operations with a generic element-wise activation function. Scaling factors are applied to operands A, B, and C. The pre-activation auxiliary output is also returned. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/scale_type.h" #include "cutlass/epilogue/thread/linear_combination_generic.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements. /// /// Aux = ((alpha * scale_a * scale_b) * accumulator) + ((beta * scale_c) * source) + bias /// D = activation(Aux) /// template < template<typename T> class ActivationFunctor, typename ElementOutput_, ///< Data type used to load and store tensors typename ElementAuxOutput_, ///< Data type used to store auxiliary output int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest, bool IsHeavy = false > class LinearCombinationGenericWithScalingAndAbsMax { public: using ElementOutput = ElementOutput_; using ElementAuxOutput = ElementAuxOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; using ElementScalingFactor = ElementAccumulator_; /// Data type used for absolute maximum value using ElementAbsmax = float; static bool const kIsScalingAndAmaxAuxOutputNeeded = (platform::is_same<ElementAuxOutput, cutlass::float_e4m3_t>::value || platform::is_same<ElementAuxOutput, cutlass::float_e5m2_t>::value); static bool const kIsScalingAndAmaxOutputNeeded = (platform::is_same<ElementOutput, cutlass::float_e4m3_t>::value || platform::is_same<ElementOutput, cutlass::float_e5m2_t>::value); static bool const kIsHeavy = IsHeavy; static int const kCount = Count; static const ScaleType::Kind kScale = Scale; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAuxOutput = Array<ElementAuxOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using FragmentCompute = Array<ElementCompute, kCount>; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure struct Params { struct ActivationParams : LinearCombinationGenericParams<ElementCompute>, GenericActivationTraits<ActivationFunctor<ElementCompute>>::Arguments { using LinearCombinationGenericParams<ElementCompute>::LinearCombinationGenericParams; }; ActivationParams activation; ElementScalingFactor const* scale_a_ptr = nullptr; ///< pointer to a scalar - if not null, loads it from memory ElementScalingFactor const* scale_b_ptr = nullptr; ///< pointer to b scalar - if not null, loads it from memory ElementScalingFactor const* scale_c_ptr = nullptr; ///< pointer to c scalar - if not null, loads it from memory ElementScalingFactor const* scale_d_ptr = nullptr; ///< pointer to d scalar - if not null, loads it from memory ElementScalingFactor const* scale_aux_ptr = nullptr; ///< pointer to aux scalar - if not null, loads it from memory ElementAbsmax * abs_max_aux_ptr = nullptr; ///< pointer to location to store amax of Aux ElementAbsmax * abs_max_D_ptr = nullptr; ///< pointer to location to store amax of D CUTLASS_HOST_DEVICE Params() : scale_a_ptr(nullptr), scale_b_ptr(nullptr), scale_c_ptr(nullptr), scale_d_ptr(nullptr), scale_aux_ptr(nullptr), abs_max_aux_ptr(nullptr), abs_max_D_ptr(nullptr) {} CUTLASS_HOST_DEVICE Params(ActivationParams activation_params, ElementScalingFactor const* scale_a_ptr, ElementScalingFactor const* scale_b_ptr, ElementScalingFactor const* scale_c_ptr, ElementScalingFactor const* scale_d_ptr, ElementScalingFactor const* scale_aux_ptr, ElementAbsmax * abs_max_aux_ptr, ElementAbsmax * abs_max_D_ptr) : activation(activation_params), scale_a_ptr(scale_a_ptr), scale_b_ptr(scale_b_ptr), scale_c_ptr(scale_c_ptr), scale_d_ptr(scale_d_ptr), scale_aux_ptr(scale_aux_ptr), abs_max_aux_ptr(abs_max_aux_ptr), abs_max_D_ptr(abs_max_D_ptr) {} }; private: // // Data members // Params params_; bool skip_elementwise_; // Scaling factors for output and auxiliary output ElementCompute scale_d_; ElementCompute scale_aux_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationGenericWithScalingAndAbsMax(Params const &params) : params_(params), skip_elementwise_(false), scale_d_(ElementCompute(params.scale_d_ptr ? *(params.scale_d_ptr) : ElementScalingFactor(1))), scale_aux_(ElementCompute(params.scale_aux_ptr ? *(params.scale_aux_ptr) : ElementScalingFactor(1))) { params_.activation.alpha = (params.activation.alpha_ptr ? *params.activation.alpha_ptr : params.activation.alpha); params_.activation.beta = (params.activation.beta_ptr ? *params.activation.beta_ptr : params.activation.beta); auto scale_a = ElementCompute(params.scale_a_ptr ? *(params.scale_a_ptr) : ElementScalingFactor(1)); auto scale_b = ElementCompute(params.scale_b_ptr ? *(params.scale_b_ptr) : ElementScalingFactor(1)); auto scale_c = ElementCompute(params.scale_c_ptr ? *(params.scale_c_ptr) : ElementScalingFactor(1)); multiplies<ElementCompute> multiply; params_.activation.alpha = multiply(params.activation.alpha, multiply(scale_a, scale_b)); params_.activation.beta = multiply(params.activation.beta, scale_c); } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::Nothing) return false; return params_.activation.beta != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { params_.activation.beta = ElementCompute(1); } // Only the final partition should perform the activation function // and scale the output and auxiliary output values. if (k_partition != k_partition_count - 1) { skip_elementwise_ = true; scale_d_ = ElementCompute(1.); scale_aux_ = ElementCompute(1.); } } /// Computes linear scaling: /// Aux = (alpha * scale_a * scale_b * accumulator) + (beta * scale_c * source) + bias /// D = activation(Aux) CUTLASS_HOST_DEVICE void operator()( FragmentCompute& output, FragmentCompute& aux_output, FragmentAccumulator const &accumulator, FragmentCompute const& bias, FragmentOutput const &source) { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_source = source_converter(source); FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> multiply; plus<FragmentCompute> add; multiply_add<FragmentCompute> mul_add_accumulator; ActivationFunctor<FragmentCompute> activation; if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_accumulator(params_.activation.alpha, converted_accumulator, intermediate); } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = multiply(params_.activation.beta, converted_source); intermediate = mul_add_accumulator(params_.activation.alpha, converted_accumulator, intermediate); } intermediate = add(intermediate, bias); aux_output = intermediate; if constexpr (GenericActivationTraits<ActivationFunctor<ElementCompute>>::IsArgumentsNeeded) { output = skip_elementwise_ ? intermediate : activation(intermediate, params_.activation); } else { output = skip_elementwise_ ? intermediate : activation(intermediate); } } /// Computes linear scaling: /// Aux = (alpha * scale_a * scale_b * accumulator) + bias /// D = activation(Aux) CUTLASS_DEVICE void operator()( FragmentCompute& output, FragmentCompute& aux_output, FragmentAccumulator const &accumulator, FragmentCompute const& bias) { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> multiply; plus<FragmentCompute> add; ActivationFunctor<FragmentCompute> activation; if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = multiply(params_.activation.alpha, converted_accumulator); } intermediate = add(intermediate, bias); aux_output = intermediate; if constexpr (GenericActivationTraits<ActivationFunctor<FragmentCompute>>::IsArgumentsNeeded) { output = skip_elementwise_ ? intermediate : activation(intermediate, params_.activation); } else { output = skip_elementwise_ ? intermediate : activation(intermediate); } } CUTLASS_HOST_DEVICE ElementAbsmax* get_ptr_output_abs_max() const { return params_.abs_max_D_ptr; } CUTLASS_HOST_DEVICE ElementAbsmax* get_ptr_aux_output_abs_max() const { return params_.abs_max_aux_ptr; } CUTLASS_HOST_DEVICE ElementCompute get_scale_d() const { return scale_d_; } CUTLASS_HOST_DEVICE ElementCompute get_scale_aux() const { return scale_aux_; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
cutlass/include/cutlass/epilogue/thread/linear_combination_generic_with_scaling.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/thread/linear_combination_generic_with_scaling.h", "repo_id": "cutlass", "token_count": 4504 }
31
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #if !defined(__CUDACC_RTC__) #include <type_traits> #include <utility> #endif #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/cutlass.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// // // This is used for metaprogramming epilogue functors. If they define // `static bool const kIsHeavy = true;`, then the epilogue functor itself is // not inlined. This results in smaller code and is advantageous if the epilogue // functor consists of many instructions. // // If the epilogue functor does not define `kIsHeavy` or if it is `false`, then // the behavior from CUTLASS 2.5 and before is retained. The epilogue is fully // unrolled and inlined. // template<class> struct TypeSink { typedef void type; }; template<class T> using TypeSinkT = typename TypeSink<T>::type; template<class T, class=void> struct IsEpilogueFunctorHeavy { static bool const value = false; }; template<class T> struct IsEpilogueFunctorHeavy<T, TypeSinkT< decltype( T::kIsHeavy ) > > { static bool const value = T::kIsHeavy; }; //////////////////////////////////////////////////////////////////////////////// /// Base class for epilogues defining warp-level template < typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpShape_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape) int FragmentsPerIteration = 1 > class EpilogueBase { public: using Shape = Shape_; using WarpShape = WarpShape_; static int const kPartitionsK = PartitionsK; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using Padding = Padding_; /// Output layout is always row-major using Layout = layout::RowMajor; /// The complete warp-level accumulator tile using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile; /// Accumulator element using ElementAccumulator = typename AccumulatorTile::Element; /// Number of warps using WarpCount = gemm::GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, kPartitionsK >; /// Use this to control the granularity of one epilogue 'iteration' static int const kFragmentsPerIteration = FragmentsPerIteration; public: /// Shared storage allocation needed by the epilogue struct SharedStorage { // // Type definitions // /// Element type of shared memory using Element = typename WarpTileIterator::Element; /// Tensor reference to shared memory allocation using TensorRef = typename WarpTileIterator::TensorRef; /// Layout of shared memory allocation using Layout = typename WarpTileIterator::Layout; /// Logical shape of the shared memory tile written to by all warps. using Shape = MatrixShape< WarpCount::kM * WarpTileIterator::Shape::kRow * WarpCount::kK, WarpCount::kN * WarpTileIterator::Shape::kColumn >; /// Shape of the shared memory allocation for the epilogue using StorageShape = MatrixShape< (Shape::kRow + Padding::kRow) * kFragmentsPerIteration, Shape::kColumn + Padding::kColumn >; // // Data members // AlignedBuffer<Element, StorageShape::kCount> storage; // // Methods // /// Returns a pointer to the shared memory buffer CUTLASS_DEVICE Element *data() { return storage.data(); } /// Returns a tensor reference to the shared memory buffer CUTLASS_DEVICE TensorRef reference() { return TensorRef( storage.data(), Layout::packed({StorageShape::kRow, StorageShape::kColumn})); } }; protected: // // Data members // SharedStorage &shared_storage_; /// Stores a warp's fragment of accumulators to SMEM WarpTileIterator warp_tile_iterator_; public: /// Constructor CUTLASS_DEVICE EpilogueBase( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): shared_storage_(shared_storage), warp_tile_iterator_(shared_storage.reference(), lane_idx) { // Compute warp location within threadblock tile by mapping the warp_id to three coordinates: // // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_k = warp_idx / (WarpCount::kM * WarpCount::kN); int warp_mn = warp_idx % (WarpCount::kM * WarpCount::kN); int warp_m = warp_mn % WarpCount::kM; int warp_n = warp_mn / WarpCount::kM; MatrixCoord warp_offset{warp_k * WarpCount::kM + warp_m, warp_n}; warp_tile_iterator_.add_tile_offset(warp_offset); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/epilogue_base.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/epilogue_base.h", "repo_id": "cutlass", "token_count": 2588 }
32
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Visitor tree compute operations for the CUTLASS 2x epilogue */ #pragma once #include "cutlass/epilogue/threadblock/fusion/visitor_2x.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::epilogue::threadblock { using namespace cute; using namespace detail; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// // // N-nary Elementwise Compute Operation // ///////////////////////////////////////////////////////////////////////////////////////////////// template< template <class> class ComputeFn, class ElementOutput, class ElementCompute, FloatRoundStyle RoundStyle, class = void > struct VisitorCompute : VisitorImpl2x<> { using VisitorImpl2x<>::VisitorImpl2x; struct Callbacks : EmptyCallbacks { template <typename ElementAccumulator, typename... ElementInputs, int FragmentSize> CUTLASS_DEVICE Array<ElementOutput, FragmentSize> visit(int iter_idx, int row_idx, int column_idx, int frg_idx, Array<ElementAccumulator, FragmentSize> const& frg_acc, Array<ElementInputs, FragmentSize> const&... frg_inputs) { return transform_apply(cute::make_tuple(frg_inputs...), [&] (auto&& frg_input) { using ElementInput = typename cute::remove_cvref_t<decltype(frg_input)>::Element; using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>; ConvertInput convert_input{}; return convert_input(frg_input); }, [&] (auto&&... cvt_frg_inputs) { using ComputeOutput = ComputeFn<Array<ElementCompute, FragmentSize>>; using ConvertOutput = NumericArrayConverter<ElementOutput, ElementCompute, FragmentSize, RoundStyle>; ComputeOutput compute_output{}; ConvertOutput convert_output{}; return convert_output(compute_output(cvt_frg_inputs...)); } ); } }; template <class ProblemShape> CUTLASS_DEVICE auto get_callbacks( gemm::GemmCoord threadblock_tile_offset, int thread_idx, ProblemShape problem_shape ) { return Callbacks(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::epilogue::threadblock /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/fusion/visitor_compute.hpp/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/fusion/visitor_compute.hpp", "repo_id": "cutlass", "token_count": 1255 }
33
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load output tile from shared memory in epilogue. /// /// Satisfies: ReadableTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_, ///< Element data type int MaxAlignment = ThreadMap_::kElementsPerAccess * sizeof_bits<Element_>::value / 8 > class SharedLoadIterator { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::TileShape; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kMinAlignment = ThreadMap_::kElementsPerAccess * sizeof_bits<Element_>::value / 8; static int const kAlignment = (MaxAlignment < kMinAlignment ? MaxAlignment : kMinAlignment); static int const kThreads = ThreadMap::kThreads; /// Fragment object using Fragment = Array< Element, ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray< Element, ThreadMap::kElementsPerAccess, kAlignment>; /// Vector type used for SMEM loads using LoadType = AlignedArray< Element, const_min(128 / sizeof_bits<Element>::value, ThreadMap::kElementsPerAccess), const_min(16, kAlignment) >; static int const kLoadsPerAccess = AccessType::kElements / LoadType::kElements; private: // // Data members // /// Byte-level pointer uint8_t *byte_pointer_; /// Stride along adjacent rows int stride_; public: // // Methods // /// Constructor CUTLASS_DEVICE SharedLoadIterator( TensorRef ref, int thread_idx ): byte_pointer_(reinterpret_cast<uint8_t *>(ref.data())), stride_((ref.stride(0) * sizeof_bits<Element>::value) / 8) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx); // Initialize pointer byte_pointer_ += thread_offset.row() * stride_ + thread_offset.column() * sizeof(AccessType) / kElementsPerAccess; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_DEVICE void add_tile_offset(TensorCoord const &offset) { byte_pointer_ += offset.row() * Shape::kRow * stride_ + offset.column() * Shape::kColumn * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { uint8_t const *byte_pointer = byte_pointer_ + row * ThreadMap::Delta::kRow * stride_ + group * ThreadMap::Delta::kGroup* stride_ + cluster * ThreadMap::Delta::kCluster * stride_ + pointer_offset * sizeof_bits<Element>::value / 8; int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag); LoadType const *memory_pointer = reinterpret_cast<LoadType const *>(byte_pointer); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kLoadsPerAccess; ++v) { frag_ptr[frag_idx * kLoadsPerAccess + v] = memory_pointer[(column * ThreadMap::Delta::kColumn / kElementsPerAccess) * kLoadsPerAccess + v]; } } } } } } /// Loads a fragment from memory CUTLASS_DEVICE void set_smem_base_address(Index address) { } /// Loads a fragment CUTLASS_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/shared_load_iterator.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/shared_load_iterator.h", "repo_id": "cutlass", "token_count": 2462 }
34
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic structures needed for implementing the warp-scoped phase of the epilogue. These quantities assume a 'column-major' arrangement of TensorOp instructions, of which a row-oriented slice is visible per iteration. */ #pragma once #include "cutlass/matrix_shape.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/gemm.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Policy details related to the epilogue template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename InterleavedTileShape, ///< shape of indivisible instruction-level arrangement (concept: GemmShape) typename ElementC, ///< Accumulator layout typename Layout ///< target shared memory layout > struct VoltaTensorOpPolicy; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major template < typename WarpShape_ ///< shape of warp-level GEMM (concept: GemmShape) > struct VoltaTensorOpPolicy<WarpShape_, gemm::GemmShape<32, 32, 4>, half_t, layout::RowMajor> { using WarpShape = WarpShape_; using InterleavedTileShape = gemm::GemmShape<32, 32, 4>; using ElementC = half_t; using Layout = layout::RowMajor; /// Shape of one warp-levelinstruction using InstructionShape = gemm::GemmShape<16, 16, 4>; /// Number of mma operations performed for one 32x32x4 interleaved tile using MmaIterations = MatrixShape< InterleavedTileShape::kM / InstructionShape::kM, InterleavedTileShape::kN / InstructionShape::kN >; /// Number of 32x32x4 interleaved tiles performed to cover the warp-level GEMM shape using TileIterations = MatrixShape< WarpShape::kM / InterleavedTileShape::kM, WarpShape::kN / InterleavedTileShape::kN >; /// Number of accumulator elements owned by each thread per Mma static int const kElementsPerMma = 8; static int const kRowsPerIteration = 16; // // Hard-coded constants regarding Tensor Operations // /// Number of accumulator elements stored per memory instruction to shared memory static int const kElementsPerAccess = 4; /// Number of accesses performed per interleaved tile static int const kAccessesPerInterleavedTile = 4; /// Total number of iterations needed to cover the entire tile static int const kIterations = TileIterations::kRow * 2; // // Derived types // /// Array type for aligned memory accesses using AccessType = AlignedArray<ElementC, kElementsPerAccess>; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< ElementC, kElementsPerAccess * kAccessesPerInterleavedTile * TileIterations::kColumn>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = Array< ElementC, TileIterations::kCount * MmaIterations::kCount * kElementsPerMma>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major template < typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape) > struct VoltaTensorOpPolicy<WarpShape_, gemm::GemmShape<32, 32, 4>, float, layout::RowMajor> { using WarpShape = WarpShape_; using InterleavedTileShape = gemm::GemmShape<32, 32, 4>; using ElementC = float; using Layout = layout::RowMajor; /// Shape of one warp-levelinstruction using InstructionShape = gemm::GemmShape<16, 16, 4>; /// Number of mma operations performed for one 32x32x4 interleaved tile using MmaIterations = MatrixShape< InterleavedTileShape::kM / InstructionShape::kM, InterleavedTileShape::kN / InstructionShape::kN >; /// Number of 32x32x4 interleaved tiles performed to cover the warp-level GEMM shape using TileIterations = MatrixShape< WarpShape::kM / InterleavedTileShape::kM, WarpShape::kN / InterleavedTileShape::kN >; /// Number of accumulator elements owned by each thread per Mma static int const kElementsPerMma = 8; static int const kRowsPerIteration = 16; // // Hard-coded constants regarding Tensor Operations // /// Number of accumulator elements stored per memory instruction to shared memory static int const kElementsPerAccess = 2; /// Number of accesses performed per interleaved tile static int const kAccessesPerInterleavedTile = 8; /// Number of rows per interleaved tile static int const kRowsPerMmaTile = 2; /// Total number of iterations needed to cover the entire tile static int const kIterations = TileIterations::kRow * MmaIterations::kRow; // // Derived types // /// Array type for aligned memory accesses using AccessType = AlignedArray<ElementC, kElementsPerAccess>; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< ElementC, kElementsPerAccess * kAccessesPerInterleavedTile * TileIterations::kColumn>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = Array< ElementC, TileIterations::kCount * MmaIterations::kCount * kElementsPerMma>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/warp/volta_tensor_op_policy.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/warp/volta_tensor_op_policy.h", "repo_id": "cutlass", "token_count": 2098 }
35
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/detail/dependent_false.hpp" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/detail/layout.hpp" #include "cutlass/numeric_types.h" #include "cutlass/pipeline/pipeline.hpp" #include "cutlass/transform/collective/sm90_wgmma_transpose.hpp" #include "cutlass/trace.h" #include "cute/arch/cluster_sm90.hpp" #include "cute/arch/copy_sm90.hpp" #include "cute/algorithm/functional.hpp" #include "cute/atom/mma_atom.hpp" #include "cute/algorithm/gemm.hpp" #include "cute/tensor_predicate.hpp" #include "cute/numeric/arithmetic_tuple.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::collective { using namespace cute; ///////////////////////////////////////////////////////////////////////////////////////////////// // WarpSpecialized Mainloop that source A operand from registers template < int Stages, class ClusterShape, class KernelSchedule, class TileShape_, class ElementA_, class StrideA_, class ElementB_, class StrideB_, class TiledMma_, class GmemTiledCopyA_, class SmemLayoutAtomA_, class SmemCopyAtomA_, class TransformA_, class GmemTiledCopyB_, class SmemLayoutAtomB_, class SmemCopyAtomB_, class TransformB_> struct CollectiveMma< MainloopSm90TmaGmmaRmemAWarpSpecialized<Stages, ClusterShape, KernelSchedule>, TileShape_, ElementA_, StrideA_, ElementB_, StrideB_, TiledMma_, GmemTiledCopyA_, SmemLayoutAtomA_, SmemCopyAtomA_, TransformA_, GmemTiledCopyB_, SmemLayoutAtomB_, SmemCopyAtomB_, TransformB_> { // // Type Aliases // using DispatchPolicy = MainloopSm90TmaGmmaRmemAWarpSpecialized<Stages, ClusterShape, KernelSchedule>; using TileShape = TileShape_; using ElementA = ElementA_; using StrideA = StrideA_; using ElementB = ElementB_; using StrideB = StrideB_; using TiledMma = TiledMma_; using ElementAccumulator = typename TiledMma::ValTypeC; using GmemTiledCopyA = GmemTiledCopyA_; using GmemTiledCopyB = GmemTiledCopyB_; using SmemLayoutAtomA = SmemLayoutAtomA_; using SmemLayoutAtomB = SmemLayoutAtomB_; using SmemCopyAtomA = SmemCopyAtomA_; using SmemCopyAtomB = SmemCopyAtomB_; using CtaShape_MNK = decltype(shape_div(TileShape{}, ClusterShape{})); // Swap and transpose A/B for A k-major layout and B mn-major layout since WGMMA is k-major only (e.g. tf32, Fp32, Int8, Fp8 WGMMA) static constexpr bool IsLayoutAkBmn = cute::is_same_v<gemm::detail::StrideToLayoutTagA_t<StrideA>, layout::RowMajor> && cute::is_same_v<gemm::detail::StrideToLayoutTagB_t<StrideB>, layout::RowMajor>; static constexpr bool IsInputSizeTwoBytes = sizeof(ElementA) == 2 && sizeof(ElementB) == 2; static constexpr bool SwapAB = !IsInputSizeTwoBytes && IsLayoutAkBmn; using InternalSmemLayoutAtomA = cute::conditional_t<!SwapAB, SmemLayoutAtomA, SmemLayoutAtomB>; using InternalSmemLayoutAtomB = cute::conditional_t<!SwapAB, SmemLayoutAtomB, SmemLayoutAtomA>; using InternalSmemCopyAtomA = cute::conditional_t<!SwapAB, SmemCopyAtomA, SmemCopyAtomB>; using InternalSmemCopyAtomB = cute::conditional_t<!SwapAB, SmemCopyAtomB, SmemCopyAtomA>; // TMA converts f32 input to tf32 when copying from GMEM to SMEM // For all other types, cast to size equivalent uint type to avoid any rounding by TMA. static constexpr bool ConvertF32toTF32A = cute::is_same_v<float, ElementA>; static constexpr bool ConvertF32toTF32B = cute::is_same_v<float, ElementB>; using ConvertedElementA = cute::conditional_t<ConvertF32toTF32A, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementA>>>; using ConvertedElementB = cute::conditional_t<ConvertF32toTF32B, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementB>>>; using InternalElementA = cute::conditional_t<!SwapAB, ConvertedElementA, ConvertedElementB>; using InternalElementB = cute::conditional_t<!SwapAB, ConvertedElementB, ConvertedElementA>; using InternalStrideA = cute::conditional_t<!SwapAB, StrideA, StrideB>; using InternalStrideB = cute::conditional_t<!SwapAB, StrideB, StrideA>; using TransformA = TransformA_; using TransformB = TransformB_; using ArchTag = typename DispatchPolicy::ArchTag; using MainloopPipeline = cutlass::PipelineTmaAsync<DispatchPolicy::Stages>; using PipelineState = cutlass::PipelineState<DispatchPolicy::Stages>; using PipelineParams = typename MainloopPipeline::Params; static_assert(cute::rank(InternalSmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)"); static_assert((size<0>(TileShape{}) % size<0>(InternalSmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert((size<2>(TileShape{}) % size<1>(InternalSmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert(cute::rank(InternalSmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)"); static_assert((size<1>(TileShape{}) % size<0>(InternalSmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert((size<2>(TileShape{}) % size<1>(InternalSmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); // Tile along modes in a way that maximizes the TMA box size. using SmemLayoutA = decltype(tile_to_shape( InternalSmemLayoutAtomA{}, make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}), cute::conditional_t< ::cutlass::gemm::detail::is_major<0,InternalStrideA>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{})); using SmemLayoutB = decltype(tile_to_shape( InternalSmemLayoutAtomB{}, make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}), cute::conditional_t< ::cutlass::gemm::detail::is_major<0,InternalStrideB>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{})); // If A mn-layout and B mn-layout, transposing B matrix since WGMMA is k-major only (e.g. tf32, fp32, fp8, int8). static constexpr bool IsLayoutAmnBmn = cute::is_same_v<gemm::detail::StrideToLayoutTagA_t<StrideA>, layout::ColumnMajor> && cute::is_same_v<gemm::detail::StrideToLayoutTagB_t<StrideB>, layout::RowMajor>; static constexpr bool TransposeB = !IsInputSizeTwoBytes && IsLayoutAmnBmn; using TransposeOperandB = decltype(cutlass::transform::collective::detail::make_transpose_operand_b( 0, 0, TiledMma{}, SmemLayoutB{}, InternalSmemLayoutAtomB{}, InternalElementB{}, cute::bool_constant<TransposeB>{})); static_assert(DispatchPolicy::Stages >= 2, "Specialization requires Stages set to value 2 or more."); static_assert(not cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeA>::value && cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeB>::value, "MMA atom must source A from rmem and B operand from smem_desc for this mainloop."); static_assert(cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>, "GmemTiledCopy - invalid SM90 TMA copy atom specified."); static_assert(cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD> || cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>, "GmemTiledCopy - invalid SM90 TMA copy atom specified."); using GmmaSmemLayoutAtomB = decltype(transform::collective::detail::gmma_smem_transpose_or_passthrough< TransposeB, InternalSmemLayoutAtomB, InternalElementB>()); // SmemLayoutB for GMMA is different from SmemLayoutB for TMA if TransposeB using GmmaSmemLayoutB = decltype(tile_to_shape( GmmaSmemLayoutAtomB{}, make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}), cute::conditional_t< ::cutlass::gemm::detail::is_major<0,InternalStrideB>(), Step<_2,_1,_3>, Step<_1,_2,_3>>{})); static_assert(!SwapAB || !TransposeB, "Cannot SwapAB and TransposeB at the same time."); static_assert(TransposeB xor (cute::is_same_v<SmemLayoutB, GmmaSmemLayoutB>), "Should be same layout if not TransposeB."); static_assert(!TransposeB || (cutlass::bits_to_bytes((size<1>(SmemLayoutB{}) * sizeof_bits<InternalElementB>::value))) == 128, "SmemLayoutB K must be 128bytes to be transposed."); static constexpr bool uses_universal_transposition() { if constexpr (TransposeB) { return transform::collective::detail::use_universal_transposition<InternalSmemLayoutAtomB, InternalElementB>(); } else { return false; } } static_assert(!uses_universal_transposition(), "Warp specialized ARF kernels have not supported universal B transposition yet."); static constexpr size_t SmemAlignmentA = cutlass::detail::alignment_for_swizzle(SmemLayoutA{}); static constexpr size_t SmemAlignmentB = cutlass::detail::alignment_for_swizzle(SmemLayoutB{}); static_assert(SmemAlignmentA >= 128 and SmemAlignmentB >= 128, "Require at least 128B alignment"); struct SharedStorage { struct TensorStorage : cute::aligned_struct<cute::max(SmemAlignmentA, SmemAlignmentB)> { cute::array_aligned<typename TiledMma::ValTypeA, cute::cosize_v<SmemLayoutA>, SmemAlignmentA> smem_A; cute::array_aligned<typename TiledMma::ValTypeB, cute::cosize_v<SmemLayoutB>, SmemAlignmentB> smem_B; } tensors; using PipelineStorage = typename MainloopPipeline::SharedStorage; PipelineStorage pipeline; }; using TensorStorage = typename SharedStorage::TensorStorage; using PipelineStorage = typename SharedStorage::PipelineStorage; // Host side kernel arguments struct Arguments { ElementA const* ptr_A = nullptr; StrideA dA{}; ElementB const* ptr_B = nullptr; StrideB dB{}; uint32_t mma_promotion_interval = 4; }; // Device side kernel params struct Params { // Assumption: StrideA is congruent with Problem_MK using TMA_A = decltype(make_tma_copy( GmemTiledCopyA{}, make_tensor(static_cast<InternalElementA const*>(nullptr), repeat_like(InternalStrideA{}, int32_t(0)), InternalStrideA{}), SmemLayoutA{}(_,_,cute::Int<0>{}), make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})), size<1>(ClusterShape{}))); // mcast along N mode for this M load, if any // Assumption: StrideB is congruent with Problem_NK using TMA_B = decltype(make_tma_copy( GmemTiledCopyB{}, make_tensor(static_cast<InternalElementB const*>(nullptr), repeat_like(InternalStrideB{}, int32_t(0)), InternalStrideB{}), SmemLayoutB{}(_,_,cute::Int<0>{}), make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})), size<0>(ClusterShape{}))); // mcast along M mode for this N load, if any TMA_A tma_load_a; TMA_B tma_load_b; }; // // Methods // template <class ProblemShape> static constexpr Params to_underlying_arguments(ProblemShape const& problem_shape, Arguments const& args, void* workspace) { (void) workspace; // Optionally append 1s until problem shape is rank-4 (MNKL), in case it is only rank-3 (MNK) auto problem_shape_MNKL = append<4>(problem_shape, 1); auto [M,N,K,L] = problem_shape_MNKL; if constexpr (SwapAB) { M = get<1>(problem_shape_MNKL); N = get<0>(problem_shape_MNKL); } InternalElementA const* ptr_A; InternalStrideA dA; InternalElementB const* ptr_B; InternalStrideB dB; if constexpr (not SwapAB) { ptr_A = reinterpret_cast<InternalElementA const*>(args.ptr_A); ptr_B = reinterpret_cast<InternalElementB const*>(args.ptr_B); dA = args.dA; dB = args.dB; } else { ptr_A = reinterpret_cast<InternalElementA const*>(args.ptr_B); ptr_B = reinterpret_cast<InternalElementB const*>(args.ptr_A); dA = args.dB; dB = args.dA; } Tensor tensor_a = make_tensor(ptr_A, make_layout(make_shape(M,K,L), dA)); Tensor tensor_b = make_tensor(ptr_B, make_layout(make_shape(N,K,L), dB)); typename Params::TMA_A tma_load_a = make_tma_copy( GmemTiledCopyA{}, tensor_a, SmemLayoutA{}(_,_,cute::Int<0>{}), make_shape(shape<0>(TileShape{}), shape<2>(TileShape{})), size<1>(ClusterShape{})); // mcast along N mode for this M load, if any typename Params::TMA_B tma_load_b = make_tma_copy( GmemTiledCopyB{}, tensor_b, SmemLayoutB{}(_,_,cute::Int<0>{}), make_shape(shape<1>(TileShape{}), shape<2>(TileShape{})), size<0>(ClusterShape{})); // mcast along M mode for this N load, if any return { tma_load_a, tma_load_b }; } template<class ProblemShape> CUTLASS_HOST_DEVICE static bool can_implement( ProblemShape const& problem_shape, [[maybe_unused]] Arguments const& args) { constexpr int tma_alignment_bits = 128; auto problem_shape_MNKL = append<4>(problem_shape, 1); auto [M,N,K,L] = problem_shape_MNKL; bool implementable = true; constexpr int min_tma_aligned_elements_A = tma_alignment_bits / cutlass::sizeof_bits<ElementA>::value; implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_A>(cute::make_shape(M,K,L), StrideA{}); constexpr int min_tma_aligned_elements_B = tma_alignment_bits / cutlass::sizeof_bits<ElementB>::value; implementable = implementable && cutlass::detail::check_alignment<min_tma_aligned_elements_B>(cute::make_shape(N,K,L), StrideB{}); if (!implementable) { CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n"); } return implementable; } static constexpr int K_PIPE_MAX = DispatchPolicy::Stages; static constexpr uint32_t TmaTransactionBytes = cutlass::bits_to_bytes(size<0>(SmemLayoutA{}) * size<1>(SmemLayoutA{}) * static_cast<uint32_t>(sizeof_bits<InternalElementA>::value)) + cutlass::bits_to_bytes(size<0>(SmemLayoutB{}) * size<1>(SmemLayoutB{}) * static_cast<uint32_t>(sizeof_bits<InternalElementB>::value)) ; /// Issue Tma Descriptor Prefetch -- ideally from a single thread for best performance CUTLASS_DEVICE static void prefetch_tma_descriptors(Params const& mainloop_params) { cute::prefetch_tma_descriptor(mainloop_params.tma_load_a.get_tma_descriptor()); cute::prefetch_tma_descriptor(mainloop_params.tma_load_b.get_tma_descriptor()); } /// Set up the data needed by this collective for load and mma. /// Returns a tuple of tensors. The collective and the kernel layer have the contract /// Returned tuple must contain at least two elements, with the first two elements being: /// gA_mkl - The tma tensor, A after a local tile so it has shape (BLK_M,BLK_K,m,k,l) /// gB_nkl - The tma tensor, B after a local tile so it has shape (BLK_N,BLK_K,n,k,l) /// The rest of the tensors can be specified as needed by this collective. template <class ProblemShape_MNKL> CUTLASS_DEVICE auto load_init(ProblemShape_MNKL const& problem_shape_MNKL, Params const& mainloop_params) const { using X = Underscore; // Separate out problem shape for convenience auto [M,N,K,L] = problem_shape_MNKL; // TMA requires special handling of strides to deal with coord codomain mapping // Represent the full tensors -- get these from TMA Tensor mA_mkl = mainloop_params.tma_load_a.get_tma_tensor(make_shape(M,K,L)); // (m,k,l) Tensor mB_nkl = mainloop_params.tma_load_b.get_tma_tensor(make_shape(N,K,L)); // (n,k,l) // Make tiled views, defer the slice Tensor gA_mkl = local_tile(mA_mkl, TileShape{}, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l) Tensor gB_nkl = local_tile(mB_nkl, TileShape{}, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l) return cute::make_tuple(gA_mkl, gB_nkl); } /// Perform a collective-scoped matrix multiply-accumulate /// Producer Perspective template < class TensorA, class TensorB, class KTileIterator, class BlockCoord > CUTLASS_DEVICE void load( Params const& mainloop_params, MainloopPipeline pipeline, PipelineState smem_pipe_write, cute::tuple<TensorA, TensorB> const& load_inputs, BlockCoord const& blk_coord, KTileIterator k_tile_iter, int k_tile_count, int thread_idx, uint32_t block_rank_in_cluster, TensorStorage& shared_tensors) { int lane_predicate = cute::elect_one_sync(); if (lane_predicate) { Tensor sA_ = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE) Tensor sB_ = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE) Tensor sA = as_position_independent_swizzle_tensor(sA_); // (BLK_M,BLK_K,PIPE) Tensor sB = as_position_independent_swizzle_tensor(sB_); // (BLK_N,BLK_K,PIPE) // // Prepare the TMA loads for A and B // constexpr uint32_t cluster_shape_x = get<0>(ClusterShape()); uint2 cluster_local_block_id = {block_rank_in_cluster % cluster_shape_x, block_rank_in_cluster / cluster_shape_x}; Tensor gA_mkl = get<0>(load_inputs); Tensor gB_nkl = get<1>(load_inputs); auto block_tma_a = mainloop_params.tma_load_a.get_slice(cluster_local_block_id.y); auto block_tma_b = mainloop_params.tma_load_b.get_slice(cluster_local_block_id.x); // Partition the inputs based on the current block coordinates. auto [m_coord, n_coord, k_coord, l_coord] = blk_coord; Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k) Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k) // Applies the mapping from block_tma_a Tensor tAgA = block_tma_a.partition_S(gA); // (TMA,TMA_M,TMA_K,k) Tensor tAsA = block_tma_a.partition_D(sA); // (TMA,TMA_M,TMA_K,PIPE) Tensor tBgB = block_tma_b.partition_S(gB); // (TMA,TMA_N,TMA_K,k) Tensor tBsB = block_tma_b.partition_D(sB); // (TMA,TMA_N,TMA_K,PIPE) uint16_t mcast_mask_a = 0; uint16_t mcast_mask_b = 0; // Issue TmaLoads // Maps the tile -> block, value if constexpr (cute::is_same_v<GmemTiledCopyA, SM90_TMA_LOAD_MULTICAST>) { auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id for (int n = 0; n < size<1>(block_layout); ++n) { mcast_mask_a |= (uint16_t(1) << block_layout(cluster_local_block_id.x,n,Int<0>{})); } } if constexpr (cute::is_same_v<GmemTiledCopyB, SM90_TMA_LOAD_MULTICAST>) { auto block_layout = Layout<typename DispatchPolicy::ClusterShape>{}; // (m,n) -> block_id for (int m = 0; m < size<0>(block_layout); ++m) { mcast_mask_b |= (uint16_t(1) << block_layout(m,cluster_local_block_id.y,Int<0>{})); } } // Mainloop CUTLASS_PRAGMA_NO_UNROLL for ( ; k_tile_count > 0; --k_tile_count) { // LOCK smem_pipe_write for _writing_ pipeline.producer_acquire(smem_pipe_write); // // Copy gmem to smem for *k_tile_iter // using BarrierType = typename MainloopPipeline::ProducerBarrierType; BarrierType* tma_barrier = pipeline.producer_get_barrier(smem_pipe_write); int write_stage = smem_pipe_write.index(); copy(mainloop_params.tma_load_a.with(*tma_barrier, mcast_mask_a), tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,write_stage)); copy(mainloop_params.tma_load_b.with(*tma_barrier, mcast_mask_b), tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,write_stage)); ++k_tile_iter; // Advance smem_pipe_write ++smem_pipe_write; } } } /// Perform a Producer Epilogue to prevent early exit of blocks in a Cluster CUTLASS_DEVICE void load_tail(MainloopPipeline pipeline, PipelineState smem_pipe_write) { int lane_predicate = cute::elect_one_sync(); // Issue the epilogue waits if (lane_predicate) { /* This helps avoid early exit of blocks in Cluster * Waits for all stages to either be released (all * Consumer UNLOCKs), or if the stage was never used * then would just be acquired since the phase was * still inverted from make_producer_start_state */ pipeline.producer_tail(smem_pipe_write); } } /// Perform a collective-scoped matrix multiply-accumulate /// Consumer Perspective template < class FrgTensorC > CUTLASS_DEVICE void mma(MainloopPipeline pipeline, PipelineState smem_pipe_read, FrgTensorC& accum, int k_tile_count, int thread_idx, TensorStorage& shared_tensors, Params const& mainloop_params) { static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident."); static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3."); static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3."); static_assert(cute::rank(InternalSmemLayoutAtomA{}) == 2, "InternalSmemLayoutAtomA must be rank 2."); static_assert(cute::rank(InternalSmemLayoutAtomB{}) == 2, "InternalSmemLayoutAtomB must be rank 2."); static_assert(!cute::is_void_v<InternalSmemCopyAtomA>, "SM90 GMMA mainloops must specify a non-void copy atom for smem sourced instructions."); static_assert(cute::is_void_v<InternalSmemCopyAtomB>, "SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions."); // Obtain warp index int warp_idx = canonical_warp_idx_sync(); [[maybe_unused]] int warp_group_thread_idx = thread_idx % 128; Tensor sA_ = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE) Tensor sA = as_position_independent_swizzle_tensor(sA_); // (BLK_M,BLK_K,PIPE) Tensor sB_ = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE) Tensor sB = as_position_independent_swizzle_tensor(sB_); // (BLK_M,BLK_K,PIPE) // If TransposeB, GMMA will read from transposed B layout SMEM Tensor gmma_sB_position_dependent = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), GmmaSmemLayoutB{}); // (BLK_N,BLK_K,PIPE) Tensor gmma_sB = as_position_independent_swizzle_tensor(gmma_sB_position_dependent); // (BLK_N,BLK_K,PIPE) // // Define C accumulators and A/B partitioning // TiledMma tiled_mma; auto thread_mma = tiled_mma.get_thread_slice(thread_idx); // Allocate fragments and descriptors Tensor tCsA = thread_mma.partition_A(sA); Tensor tCrA = thread_mma.partition_fragment_A(sA(_,_,Int<0>{})); // (MMA,MMA_M,MMA_K,PIPE) Tensor tCsB = thread_mma.partition_B(gmma_sB_position_dependent); // (MMA,MMA_N,MMA_K,PIPE) Tensor tCrB = thread_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K,PIPE) // // Copy Atom A retiling // auto smem_tiled_copy_A = make_tiled_copy_A(InternalSmemCopyAtomA{}, tiled_mma); auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(thread_idx); Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); // (CPY,CPY_M,CPY_K) Tensor tCsA_copy_view = smem_thr_copy_A.partition_S(sA); // (CPY,CPY_M,CPY_K) CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K CUTE_STATIC_ASSERT_V(size<1>(tCsA_copy_view) == size<1>(tCrA_copy_view)); // CPY_M CUTE_STATIC_ASSERT_V(size<2>(tCsA_copy_view) == size<2>(tCrA_copy_view)); // CPY_K CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<2>(accum)); // N CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // K CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tCsB)); // PIPE CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE CUTE_STATIC_ASSERT_V(size<2>(tCrA) > _2{}, "RS loops require more than 2 MMA k-iterations for correctness."); // // PIPELINED MAIN LOOP // // We release buffers to producer warps(dma load) with some mmas in flight PipelineState smem_pipe_release = smem_pipe_read; tiled_mma.accumulate_ = GMMA::ScaleOut::Zero; TransposeOperandB transpose = cutlass::transform::collective::detail::make_transpose_operand_b( warp_idx, warp_group_thread_idx, tiled_mma, SmemLayoutB{}, InternalSmemLayoutAtomB{}, InternalElementB{}, cute::bool_constant<TransposeB>{}); warpgroup_fence_operand(accum); ConsumerToken barrier_token = {BarrierStatus::WaitAgain}; // first k tile { barrier_token = pipeline.consumer_try_wait(smem_pipe_read); pipeline.consumer_wait(smem_pipe_read, barrier_token); int read_stage = smem_pipe_read.index(); ++smem_pipe_read; barrier_token = pipeline.consumer_try_wait(smem_pipe_read); // copy smem->rmem for A operand copy(smem_tiled_copy_A, tCsA_copy_view(_,_,0,read_stage), tCrA_copy_view(_,_,0)); // transpose B operand in SMEM transpose(sB, gmma_sB, read_stage, 0); // Unroll the K mode manually to set scale D to 1 CUTLASS_PRAGMA_UNROLL for (int k_block = 0; k_block < size<2>(tCrA) - 1; ++k_block) { copy(smem_tiled_copy_A, tCsA_copy_view(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1)); transpose.synchronize(k_block); transpose(sB, gmma_sB, read_stage, k_block + 1); warpgroup_arrive(); // (V,M) x (V,N) => (V,M,N) cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum); if(k_block == 0) { tiled_mma.accumulate_ = GMMA::ScaleOut::One; } warpgroup_commit_batch(); } warpgroup_wait<2>(); warpgroup_arrive(); // (V,M) x (V,N) => (V,M,N) cute::gemm(tiled_mma, tCrA(_,_,size<2>(tCrA) - 1), tCrB(_,_,size<2>(tCrA) - 1,read_stage), accum); warpgroup_commit_batch(); --k_tile_count; if(k_tile_count == 0) { return; } pipeline.consumer_wait(smem_pipe_read, barrier_token); copy(smem_tiled_copy_A, tCsA_copy_view(_,_,0,smem_pipe_read.index()), tCrA_copy_view(_,_,0)); transpose(sB, gmma_sB, smem_pipe_read.index(), 0); warpgroup_wait<2>(); } warpgroup_fence_operand(accum); // Mainloop GMMAs CUTLASS_PRAGMA_NO_UNROLL for ( ; k_tile_count > 1; --k_tile_count) { // // Compute on k_tile // int read_stage = smem_pipe_read.index(); ++smem_pipe_read; warpgroup_fence_operand(accum); // Unroll the K mode manually to set scale D to 1 CUTLASS_PRAGMA_UNROLL for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) { if (k_block == 0) { barrier_token = pipeline.consumer_try_wait(smem_pipe_read); } if (k_block == size<2>(tCrA) - 1) { pipeline.consumer_wait(smem_pipe_read, barrier_token); copy(smem_tiled_copy_A, tCsA_copy_view(_,_,0,smem_pipe_read.index()), tCrA_copy_view(_,_,0)); // transpose B operand in SMEM transpose(sB, gmma_sB, smem_pipe_read.index(), 0); } else { copy(smem_tiled_copy_A, tCsA_copy_view(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1)); // transpose B operand in SMEM transpose.synchronize(k_block); // make transpose of k_block available transpose(sB, gmma_sB, read_stage, k_block + 1); } warpgroup_arrive(); // (V,M) x (V,N) => (V,M,N) cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum); warpgroup_commit_batch(); warpgroup_wait<2>(); if (k_block == 1) { // release prior barrier pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it ++smem_pipe_release; } } warpgroup_fence_operand(accum); } warpgroup_fence_operand(accum); { // // Compute on k_tile // int read_stage = smem_pipe_read.index(); warpgroup_fence_operand(accum); // Unroll the K mode manually to set scale D to 1 CUTLASS_PRAGMA_UNROLL for (int k_block = 0; k_block < size<2>(tCrA) - 1; ++k_block) { copy(smem_tiled_copy_A, tCsA_copy_view(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1)); transpose.synchronize(k_block); // make k_block transpose available transpose(sB, gmma_sB, read_stage, k_block + 1); warpgroup_arrive(); // (V,M) x (V,N) => (V,M,N) cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum); tiled_mma.accumulate_ = GMMA::ScaleOut::One; warpgroup_commit_batch(); warpgroup_wait<2>(); if (k_block == 1) { // release prior barrier pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it ++smem_pipe_release; } } warpgroup_arrive(); // (V,M) x (V,N) => (V,M,N) cute::gemm(tiled_mma, tCrA(_,_,size<2>(tCrA) - 1), tCrB(_,_,size<2>(tCrA) - 1,read_stage), accum); warpgroup_commit_batch(); } warpgroup_fence_operand(accum); } /// Perform a Consumer Epilogue to release all buffers CUTLASS_DEVICE void mma_tail(MainloopPipeline pipeline, PipelineState smem_pipe_release, int k_tile_count) { // Prologue GMMAs int prologue_mma_count = 1; k_tile_count -= prologue_mma_count; smem_pipe_release.advance(k_tile_count); // Wait on all GMMAs to complete warpgroup_wait<0>(); for (int count = 0; count < prologue_mma_count; ++count) { pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it ++smem_pipe_release; } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::collective /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/collective/sm90_mma_tma_gmma_rs_warpspecialized.hpp/0
{ "file_path": "cutlass/include/cutlass/gemm/collective/sm90_mma_tma_gmma_rs_warpspecialized.hpp", "repo_id": "cutlass", "token_count": 14823 }
36
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/gemm/kernel/sparse_gemm.h" #include "cutlass/gemm/kernel/default_gemm_sparse_with_visitor.h" #include "cutlass/gemm/device/default_gemm_configuration.h" #include "cutlass/epilogue/threadblock/fusion/visitor_2x.hpp" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// /*! Sparse GEMM with visitor */ template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Element type for internal accumulation typename ElementAccumulator_ = ElementC_, /// Operator class tag typename OperatorClass_ = arch::OpClassSimt, /// Tag indicating architecture to tune for typename ArchTag_ = arch::Sm80, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::InstructionShape, /// Epilogue output operator typename FusionCallbacks_ = typename cutlass::epilogue::threadblock::detail::EmptyCallbacks, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_ = typename threadblock::GemmIdentityThreadblockSwizzle<>, /// Number of stages used in the pipelined mainloop int Stages = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kStages, /// Access granularity of A matrix in units of elements int AlignmentA = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentA, /// Access granularity of B matrix in units of elements int AlignmentB = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentB, /// Operation performed by GEMM typename Operator_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::Operator, /// Number of stages used in the pipelined epilogue int EpilogueStages = 1> class SparseGemmWithVisitor { public: using ElementA = ElementA_; using LayoutA = LayoutA_; using TensorRefA = TensorRef<ElementA const, LayoutA>; using ElementB = ElementB_; using LayoutB = LayoutB_; using TensorRefB = TensorRef<ElementB const, LayoutB>; using ElementC = ElementC_; using LayoutC = LayoutC_; using ElementAccumulator = ElementAccumulator_; using OperatorClass = OperatorClass_; using ArchTag = ArchTag_; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using FusionCallbacks = FusionCallbacks_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; using MathOperator = Operator; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentB = AlignmentB; /// Define the kernel using GemmKernel = typename kernel::DefaultSparseGemmWithVisitor< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, FusionCallbacks, ThreadblockSwizzle, kStages, Operator, EpilogueStages >::GemmKernel; using ElementE = typename GemmKernel::ElementE; using LayoutE = typename GemmKernel::LayoutE; static int const kAlignmentE = 128 / sizeof_bits<ElementE>::value; static int const kSparse = GemmKernel::kSparse; static int const kMetaSizeInBits = GemmKernel::kMetaSizeInBits; static int const kElementsPerElementE = GemmKernel::kElementsPerElementE; /// Argument structure struct Arguments { // // Data members // GemmCoord problem_size; TensorRef<ElementA const, LayoutA> ref_A; TensorRef<ElementB const, LayoutB> ref_B; TensorRef<ElementE const, LayoutE> ref_E; typename FusionCallbacks::Arguments epilogue; // // Methods // /// Default ctor CUTLASS_HOST_DEVICE Arguments(): problem_size(0, 0, 0) { } /// Constructs an Arguments structure CUTLASS_HOST_DEVICE Arguments( GemmCoord problem_size_, TensorRef<ElementA const, LayoutA> ref_A_, TensorRef<ElementB const, LayoutB> ref_B_, TensorRef<ElementE, LayoutE> ref_E_, typename FusionCallbacks::Arguments epilogue_ = typename FusionCallbacks::Arguments() ): problem_size(problem_size_), ref_A(ref_A_), ref_B(ref_B_), ref_E(ref_E_), epilogue(epilogue_) { } }; private: /// Kernel parameters object typename GemmKernel::Params params_; public: /// Constructs the GEMM. SparseGemmWithVisitor() { } /// Determines whether the GEMM can execute the given problem. static Status can_implement(Arguments const &args) { Status status = GemmKernel::can_implement( args.problem_size, args.ref_A.non_const_ref(), args.ref_B.non_const_ref(), cutlass::TensorRef<ElementC, LayoutC>(), // It only matters that it's empty. cutlass::TensorRef<ElementC, LayoutC>(), // Same as above. args.ref_E.non_const_ref() ); if (status != Status::kSuccess) { return status; } return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { size_t bytes = 0; return bytes; } /// Initializes GEMM state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { constexpr int SplitKSlices = 1; // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape( args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, SplitKSlices); // Initialize the Params structure params_ = typename GemmKernel::Params{ args.problem_size, grid_shape, args.ref_A.non_const_ref(), args.ref_B.non_const_ref(), args.ref_E.non_const_ref(), args.epilogue }; int smem_size = int(sizeof(typename GemmKernel::SharedStorage)); if (smem_size >= (48 << 10)) { cudaError_t result = cudaFuncSetAttribute(Kernel<GemmKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } return Status::kSuccess; } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { params_.ref_A.reset(args.ref_A.non_const_ref().data()); params_.ref_B.reset(args.ref_B.non_const_ref().data()); params_.ref_E.reset(args.ref_E.non_const_ref().data()); params_.output_op = args.epilogue; return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); dim3 block(GemmKernel::kThreadCount, 1, 1); int smem_size = int(sizeof(typename GemmKernel::SharedStorage)); cutlass::Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params_); cudaError_t result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; } // namespace device } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/device/gemm_sparse_with_visitor.h/0
{ "file_path": "cutlass/include/cutlass/gemm/device/gemm_sparse_with_visitor.h", "repo_id": "cutlass", "token_count": 3980 }
37
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default configuration for a GEMM with fused epilogue visitor callbacks */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/kernel/default_gemm_universal.h" #include "cutlass/gemm/kernel/gemm_universal_with_visitor.h" #include "cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h" #include "cutlass/epilogue/threadblock/epilogue_with_visitor_callbacks.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Complex elementwise transformation on A operand ComplexTransform TransformA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Complex elementwise transformation on B operand ComplexTransform TransformB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Access granularity of C matrix in unit of elements int kAlignmentC, /// Element type for internal accumulation typename ElementAccumulator, /// Element type for epilogue computation typename ElementEpilogue, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename FusionCallbacks, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// Operation performed by GEMM typename Operator, /// Number of stages used in the pipelined epilogue int EpilogueStages = 1 > struct DefaultGemmWithVisitor { using GemmBase = typename DefaultGemmUniversal< ElementA_, LayoutA_, TransformA, kAlignmentA, ElementB_, LayoutB_, TransformB, kAlignmentB, ElementC_, LayoutC_, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, epilogue::thread::LinearCombination< ElementC_, kAlignmentC, ElementAccumulator, ElementEpilogue >, ThreadblockSwizzle, Stages, Operator >::GemmKernel; // Define epilogue using Epilogue = cutlass::epilogue::threadblock::EpilogueWithVisitorCallbacks< typename GemmBase::Epilogue, FusionCallbacks, EpilogueStages >; /// GemmWithVisitor without StreamkFeature member type template <class SwizzleT, class Enable = void> class SelectBase : public GemmWithEpilogueVisitor< typename GemmBase::Mma, Epilogue, SwizzleT> {}; /// GemmWIthVisitor with StreamkFeature member type template <class SwizzleT> class SelectBase<SwizzleT, typename SwizzleT::StreamkFeature> : public GemmWithEpilogueVisitorStreamk< typename GemmBase::Mma, Epilogue, SwizzleT> {}; /// Select kernel by ThreadblockSwizzle's support for StreamkFeature using GemmKernel = SelectBase<ThreadblockSwizzle>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/default_gemm_universal_with_visitor.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/default_gemm_universal_with_visitor.h", "repo_id": "cutlass", "token_count": 1622 }
38
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Stream-K Gemm kernel compatible with fused epilogues that broadcast a bias vector over the MMA output. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/layout/layout.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/barrier.h" #include "cutlass/block_striped.h" #include "cutlass/semaphore.h" #include "cutlass/trace.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function bool IsSingleSource = Epilogue_::kIsSingleSource > struct GemmStreamkWithFusedEpilogue; // GemmStreamkWithFusedEpilogue with two sources template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_ ///! Threadblock swizzling function > struct GemmStreamkWithFusedEpilogue<Mma_, Epilogue_, ThreadblockSwizzle_, false> { using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; /// The per-thread tile of raw accumulators using AccumulatorTile = typename Mma::FragmentC; static ComplexTransform const kTransformA = Mma::kTransformA; static ComplexTransform const kTransformB = Mma::kTransformB; using Operator = typename Mma::Operator; using OperatorClass = typename Mma::Operator::OperatorClass; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename Mma::Operator::Shape; using InstructionShape = typename Mma::Policy::Operator::InstructionShape; using ArchTag = typename Mma::ArchTag; static int const kStages = Mma::kStages; static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Workspace bytes per thread block static size_t const kWorkspaceBytesPerBlock = __NV_STD_MAX( kThreadCount * sizeof(AccumulatorTile), Epilogue::kWorkspaceBytesPerBlock); /// Block-striped reduction utility using BlockStripedReduceT = BlockStripedReduce<kThreadCount, AccumulatorTile>; // // Structures // /// Argument structure struct Arguments { // // Data members // GemmUniversalMode mode{GemmUniversalMode::kGemm}; GemmCoord problem_size{}; int batch_count{1}; // Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor typename EpilogueOutputOp::Params epilogue{}; void const * ptr_A{nullptr}; void const * ptr_B{nullptr}; void const * ptr_C1{nullptr}; void const * ptr_C2{nullptr}; void * ptr_D{nullptr}; void * ptr_Vector; void * ptr_Tensor; int64_t batch_stride_A{0}; int64_t batch_stride_B{0}; int64_t batch_stride_C1{0}; int64_t batch_stride_C2{0}; int64_t batch_stride_D{0}; int64_t batch_stride_Vector{0}; int64_t batch_stride_Tensor{0}; typename LayoutA::Stride::Index lda{}; typename LayoutB::Stride::Index ldb{}; typename LayoutC::Stride::Index ldc1{}; typename LayoutC::Stride::Index ldc2{}; typename LayoutC::Stride::Index ldd{}; typename LayoutC::Stride::Index ldr{}; typename LayoutC::Stride::Index ldt{}; int avail_sms{-1}; /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) // // Methods // /// Default Constructor Arguments() = default; /// constructs an arguments structure Arguments( GemmUniversalMode mode, GemmCoord problem_size, int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K) typename EpilogueOutputOp::Params epilogue, void const * ptr_A, void const * ptr_B, void const * ptr_C1, void const * ptr_C2, void * ptr_D, void * ptr_Vector, void * ptr_Tensor, int64_t batch_stride_A, int64_t batch_stride_B, int64_t batch_stride_C1, int64_t batch_stride_C2, int64_t batch_stride_D, int64_t batch_stride_Vector, int64_t batch_stride_Tensor, typename LayoutA::Stride::Index lda, typename LayoutB::Stride::Index ldb, typename LayoutC::Stride::Index ldc1, typename LayoutC::Stride::Index ldc2, typename LayoutC::Stride::Index ldd, typename LayoutC::Stride::Index ldr, typename LayoutC::Stride::Index ldt, int avail_sms = -1) /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) : mode(mode), problem_size(problem_size), batch_count(batch_split), epilogue(epilogue), ptr_A(ptr_A), ptr_B(ptr_B), ptr_C1(ptr_C1), ptr_C2(ptr_C2), ptr_D(ptr_D), ptr_Vector(ptr_Vector), ptr_Tensor(ptr_Tensor), batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C1(batch_stride_C1), batch_stride_C2(batch_stride_C2), batch_stride_Vector(batch_stride_Vector), batch_stride_Tensor(batch_stride_Tensor), lda(lda), ldb(ldb), ldc1(ldc1), ldc2(ldc2), ldd(ldd), ldr(ldr), ldt(ldt), avail_sms(avail_sms) { CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size); CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); CUTLASS_TRACE_HOST(" ldr: " << this->ldr); CUTLASS_TRACE_HOST(" ldt: " << this->ldt); CUTLASS_TRACE_HOST(" avail_sms: " << this->avail_sms); } /// Returns arguments for the transposed problem Arguments transposed_problem() const { Arguments args(*this); std::swap(args.problem_size.m(), args.problem_size.n()); std::swap(args.ptr_A, args.ptr_B); std::swap(args.lda, args.ldb); std::swap(args.batch_stride_A, args.batch_stride_B); return args; } }; /// Parameters structure struct Params { public: // // Data members // void * ptr_A{nullptr}; void * ptr_B{nullptr}; typename Mma::IteratorA::Params params_A{}; typename Mma::IteratorB::Params params_B{}; int64_t batch_stride_A{0}; int64_t batch_stride_B{0}; GemmUniversalMode mode{GemmUniversalMode::kGemm}; ThreadblockSwizzle block_mapping{}; void *barrier_workspace{nullptr}; void *partials_workspace{nullptr}; typename EpilogueOutputOp::Params output_op{}; void * ptr_C1{nullptr}; void * ptr_C2{nullptr}; void * ptr_D{nullptr}; void * ptr_Tensor{nullptr}; void * ptr_Vector{nullptr}; typename Epilogue::OutputTileIterator::Params params_C1{}; typename Epilogue::OutputTileIterator::Params params_C2{}; typename Epilogue::OutputTileIterator::Params params_D{}; typename Epilogue::TensorTileIterator::Params params_Tensor{}; int64_t batch_stride_C1{0}; int64_t batch_stride_C2{0}; int64_t batch_stride_D{0}; int64_t batch_stride_Vector{0}; int64_t batch_stride_Tensor{0}; typename LayoutC::Stride::Index ldr{}; protected: // // Host-only dispatch-utilities // /// Pad the given allocation size up to the nearest cache line static size_t cacheline_align_up(size_t size) { static const int CACHELINE_SIZE = 128; return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE; } /// Get the workspace size needed for barrier size_t get_barrier_workspace_size() const { // For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction, // each reduction block needs its own synchronization flag. int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks); return cacheline_align_up(sizeof(typename Barrier::T) * num_flags); } /// Get the workspace size needed for intermediate partial sums size_t get_partials_workspace_size() const { int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks); } public: // // Host dispatch API // /// Default constructor Params() = default; /// Constructor Params( Arguments const &args, /// GEMM application arguments int device_sms, /// Number of SMs on the device int sm_occupancy) /// Kernel SM occupancy (in thread blocks) : params_A(args.lda), params_B(args.ldb), params_C1(args.ldc1), params_C2(args.ldc2), params_D(args.ldd), params_Tensor(args.ldt), output_op(args.epilogue), mode(args.mode), ptr_A(const_cast<void *>(args.ptr_A)), ptr_B(const_cast<void *>(args.ptr_B)), ptr_C1(const_cast<void *>(args.ptr_C1)), ptr_C2(const_cast<void *>(args.ptr_C2)), ptr_D(args.ptr_D), ptr_Vector(args.ptr_Vector), ldr(args.ldr), ptr_Tensor(args.ptr_Tensor), batch_stride_A(args.batch_stride_A), batch_stride_B(args.batch_stride_B), batch_stride_C1(args.batch_stride_C1), batch_stride_C2(args.batch_stride_C2), batch_stride_D(args.batch_stride_D), batch_stride_Vector(args.batch_stride_Vector), batch_stride_Tensor(args.batch_stride_Tensor), barrier_workspace(nullptr), partials_workspace(nullptr) { CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::Params()"); CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); CUTLASS_TRACE_HOST(" ldr: " << this->ldr); CUTLASS_TRACE_HOST(" ldt: " << args.ldt); // Number of SMs to make available for StreamK decomposition int avail_sms = (args.avail_sms == -1) ? device_sms : fast_min(args.avail_sms, device_sms); CUTLASS_TRACE_HOST(" avail_sms: " << avail_sms); // Initialize the block mapping structure block_mapping = ThreadblockSwizzle( args.mode, args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.batch_count, sm_occupancy, device_sms, avail_sms, sizeof(ElementA), sizeof(ElementB), sizeof(ElementC), Epilogue::kAccumulatorFragments); } /// Returns the workspace size (in bytes) needed for these parameters size_t get_workspace_size() const { return get_barrier_workspace_size() + get_partials_workspace_size(); } /// Assign and initialize the specified workspace buffer. Assumes /// the memory allocated to workspace is at least as large as get_workspace_size(). Status init_workspace( void *workspace, cudaStream_t stream = nullptr) { uint8_t *ptr = static_cast<uint8_t*>(workspace); // Establish partials workspace partials_workspace = nullptr; size_t partials_workspace_bytes = get_partials_workspace_size(); if (partials_workspace_bytes > 0) { if (!workspace) { return Status::kErrorWorkspaceNull; } partials_workspace = ptr; ptr += partials_workspace_bytes; } // Establish barrier workspace barrier_workspace = nullptr; size_t barrier_workspace_bytes = get_barrier_workspace_size(); if (barrier_workspace_bytes > 0) { if (!workspace) { return Status::kErrorWorkspaceNull; } barrier_workspace = ptr; ptr += barrier_workspace_bytes; } // Zero-initialize barrier workspace if (barrier_workspace) { size_t barrier_workspace_bytes = get_barrier_workspace_size(); CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes"); cudaError_t result = cudaMemsetAsync( barrier_workspace, 0, barrier_workspace_bytes, stream); if (result != cudaSuccess) { CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result)); return Status::kErrorInternal; } } return Status::kSuccess; } /// Returns the GEMM volume in thread block tiles cutlass::gemm::GemmCoord get_tiled_shape() const { return block_mapping.tiled_shape(); } /// Returns the total number of thread blocks to launch int get_grid_blocks() const { dim3 grid_dims = get_grid_dims(); return grid_dims.x * grid_dims.y * grid_dims.z; } /// Returns the grid extents in thread blocks to launch dim3 get_grid_dims() const { return block_mapping.get_grid_dims(); } /// Lightweight update given a subset of arguments. Problem geometry is assumed /// to remain the same. CUTLASS_HOST_DEVICE void update(Arguments const &args) { ptr_A = const_cast<void *>(args.ptr_A); ptr_B = const_cast<void *>(args.ptr_B); ptr_C1 = const_cast<void *>(args.ptr_C1); ptr_C2 = const_cast<void *>(args.ptr_C2); ptr_D = args.ptr_D; ptr_Vector = args.ptr_Vector; ldr = args.ldr; ptr_Tensor = args.ptr_Tensor; batch_stride_A = args.batch_stride_A; batch_stride_B = args.batch_stride_B; batch_stride_C1 = args.batch_stride_C1; batch_stride_C2 = args.batch_stride_C2; batch_stride_D = args.batch_stride_D; batch_stride_Vector = args.batch_stride_Vector; batch_stride_Tensor = args.batch_stride_Tensor; output_op = args.epilogue; CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::update()"); CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); CUTLASS_TRACE_HOST(" ldr: " << this->ldr); } }; /// Tile work descriptor struct TileWorkDesc { /// The linear tile index int tile_idx; /// The location of this tile (in threadblock-tile coordinates) in the output matrix cutlass::gemm::GemmCoord tiled_coord; // The first global-scoped MAC-iteration this threadblock will perform for this tile int iter_begin; // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile int k_begin; // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile int k_end; /// The number of remaining MAC-iterations this threadblock will perform for this tile int k_iters_remaining; // Whether this block will perform the first iteration of this tile CUTLASS_DEVICE bool tile_started() { return (k_begin == 0); } // Whether this block will perform the last iteration of this tile CUTLASS_DEVICE bool tile_finished(Params const &params) { return (k_end == params.block_mapping.problem_size.k()); } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; protected: // // Data members // /// GEMM problem parameters Params const &params; /// Shared storage reference SharedStorage &shared_storage; /// ID within the threadblock int thread_idx; /// ID of warp int warp_idx; /// ID of each thread within a warp int lane_idx; /// Threadblock scoped epilogue Epilogue epilogue; public: // // Host dispatch API // /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size) { CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::can_implement()"); static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; bool isAMisaligned = false; bool isBMisaligned = false; bool isCMisaligned = false; if (platform::is_same<LayoutA, layout::RowMajor>::value) { isAMisaligned = problem_size.k() % kAlignmentA; } else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) { isAMisaligned = problem_size.m() % kAlignmentA; } else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value || platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) { isAMisaligned = problem_size.k() % kAlignmentA; } if (platform::is_same<LayoutB, layout::RowMajor>::value) { isBMisaligned = problem_size.n() % kAlignmentB; } else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) { isBMisaligned = problem_size.k() % kAlignmentB; } else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value || platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) { isBMisaligned = problem_size.k() % kAlignmentB; } if (platform::is_same<LayoutC, layout::RowMajor>::value) { isCMisaligned = problem_size.n() % kAlignmentC; } else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) { isCMisaligned = problem_size.m() % kAlignmentC; } else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value || platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) { isCMisaligned = problem_size.n() % kAlignmentC; } if (isAMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); return Status::kErrorMisalignedOperand; } if (isBMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); return Status::kErrorMisalignedOperand; } if (isCMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); return Status::kErrorMisalignedOperand; } CUTLASS_TRACE_HOST(" returning kSuccess"); return Status::kSuccess; } static Status can_implement(Arguments const &args) { return can_implement(args.problem_size); } protected: // // Device-only utility methods // /// Iterator for fetching tile fragments from A CUTLASS_DEVICE typename Mma::IteratorA init_iterator_A( TileWorkDesc &tile_work, GemmUniversalMode mode) { // The input A matrix ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A); // Update input pointers based on batched/array mode if (mode == GemmUniversalMode::kBatched) { ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A; } if (mode == GemmUniversalMode::kArray) { ptr_A = static_cast<ElementA * const *>(params.ptr_A)[tile_work.tiled_coord.k()]; } int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM; int m_end = params.block_mapping.problem_size.m(); return Mma::IteratorA( params.params_A, ptr_A, { m_end, tile_work.k_end }, threadIdx.x, { m_begin, tile_work.k_begin }); } /// Iterator for fetching tile fragments from B CUTLASS_DEVICE typename Mma::IteratorB init_iterator_B( TileWorkDesc &tile_work, GemmUniversalMode mode) { // The input B matrix ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B); // Update input pointers based on batched/array mode if (mode == GemmUniversalMode::kBatched) { ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B; } if (mode == GemmUniversalMode::kArray) { ptr_B = static_cast<ElementB * const *>(params.ptr_B)[tile_work.tiled_coord.k()]; } int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN; int n_end = params.block_mapping.problem_size.n(); return Mma::IteratorB( params.params_B, ptr_B, { tile_work.k_end, n_end }, threadIdx.x, { tile_work.k_begin, n_begin }); } CUTLASS_DEVICE void init_dp_tile_work( TileWorkDesc &tile_work, int tile_idx) { // The linear tile index tile_work.tile_idx = tile_idx; // The first global-scoped MAC-iteration this threadblock will perform for this tile tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile(); // The number of MAC-iterations this threadblock will perform for this tile tile_work.k_iters_remaining = params.block_mapping.iters_per_tile(); // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_begin = 0; // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_end = params.block_mapping.problem_size.k(); // The location of this tile (in threadblock-tile coordinates) in the output matrix tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); } CUTLASS_DEVICE void init_sk_tile_work( TileWorkDesc &tile_work, int tile_idx, int block_iter_begin, int block_iter_end) { // The linear tile index tile_work.tile_idx = tile_idx; // The first global-scoped MAC-iteration for this tile int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile(); // The first global-scoped MAC-iteration this threadblock will perform for this tile tile_work.iter_begin = max(block_iter_begin, tile_iter_begin); // The first tile-scoped MAC-iteration this threadblock will perform for this tile int k_iter_begin = tile_work.iter_begin - tile_iter_begin; // The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile int k_iter_end = block_iter_end - tile_iter_begin; // The number of MAC-iterations this threadblock will perform for this tile tile_work.k_iters_remaining = k_iter_end - k_iter_begin; // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_begin = k_iter_begin * Mma::Shape::kK; // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_end = min( params.block_mapping.problem_size.k(), // extent of k domain (k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment // The location of this tile (in threadblock-tile coordinates) in the output matrix tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); } /// Share accumulators with peers CUTLASS_DEVICE void share_accumulators( AccumulatorTile const &accumulator_tile, int block_idx, int first_block_idx) { AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace); int accum_tile_offset = first_block_idx * kThreadCount; if (block_idx == first_block_idx) { // First peer initializes the workspace partials BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); } else { // Subsequent peers atomically accumulate into the workspace partials if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) { // Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1); } else { // Turnstile reduction order: wait until the previous peer has written int wait_count = block_idx - first_block_idx; Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count); } // Perform reduction in workspace BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); } // Signal our arrival Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx); } /// Acquire accumulators from peers CUTLASS_DEVICE void acquire_accumulators( AccumulatorTile &accumulator_tile, int block_idx, int first_block_idx) { AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace); // Wait for arrival int num_carry_in = block_idx - first_block_idx; Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in); // Load and add peer-partials accumulator tile to local accumulator tile int accum_tile_offset = first_block_idx * kThreadCount; BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx); } /// Perform epilogue computations and output CUTLASS_DEVICE void do_epilogue( TileWorkDesc &tile_work, AccumulatorTile &accumulator_tile) { ElementC *ptr_C1 = static_cast<ElementC *>(params.ptr_C1); ElementC *ptr_C2 = static_cast<ElementC *>(params.ptr_C2); ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D); typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor); // Define the reduction output pointer and move to the appropriate place typename Epilogue::ElementVector *ptr_Vector = static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector); // Update pointers for batched/array mode(s) if (params.mode == GemmUniversalMode::kBatched) { ptr_C1 += tile_work.tiled_coord.k() * params.batch_stride_C1; if (ptr_C2) { ptr_C2 += tile_work.tiled_coord.k() * params.batch_stride_C2; } ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D; if (ptr_Tensor) { ptr_Tensor = ReferenceFactory<typename Epilogue::ElementTensor>::add_pointer_offset( ptr_Tensor, tile_work.tiled_coord.k() * params.batch_stride_Tensor); } if (ptr_Vector) { ptr_Vector += tile_work.tiled_coord.k() * params.batch_stride_Vector; } } if (params.mode == GemmUniversalMode::kArray) { ptr_C1 = static_cast<ElementC * const *>(params.ptr_C1)[tile_work.tiled_coord.k()]; if (ptr_C2) { ptr_C2 = static_cast<ElementC * const *>(params.ptr_C2)[tile_work.tiled_coord.k()]; } ptr_D = static_cast<ElementC * const *>(params.ptr_D)[tile_work.tiled_coord.k()]; if (ptr_Tensor) { ptr_Tensor = static_cast<typename Epilogue::ElementTensor * const *>(params.ptr_Tensor)[tile_work.tiled_coord.k()]; } if (ptr_Vector) { ptr_Vector = static_cast<typename Epilogue::ElementVector * const *>(params.ptr_Vector)[tile_work.tiled_coord.k()]; } } // Location of this tile in item-coords MatrixCoord threadblock_item_begin( tile_work.tiled_coord.m() * Mma::Shape::kM, tile_work.tiled_coord.n() * Mma::Shape::kN ); // Tile iterator loading from residual1. typename Epilogue::OutputTileIterator iterator_C1( params.params_C1, ptr_C1, params.block_mapping.problem_size.mn(), thread_idx, threadblock_item_begin); // Tile iterator loading from residual2. typename Epilogue::OutputTileIterator iterator_C2( params.params_C2, ptr_C2, params.block_mapping.problem_size.mn(), thread_idx, threadblock_item_begin); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.block_mapping.problem_size.mn(), thread_idx, threadblock_item_begin); // Additional tensor to load from typename Epilogue::TensorTileIterator tensor_iterator( params.params_Tensor, ptr_Tensor, params.block_mapping.problem_size.mn(), thread_idx, threadblock_item_begin); // Move to appropriate location for this output tile if (ptr_Vector) { ptr_Vector += threadblock_item_begin.column() + tile_work.tiled_coord.m() * params.ldr; } // Execute the epilogue operator to update the destination tensor. epilogue( EpilogueOutputOp(params.output_op), ptr_Vector, iterator_D, accumulator_tile, iterator_C1, iterator_C2, tensor_iterator, params.block_mapping.problem_size.mn(), threadblock_item_begin); } CUTLASS_DEVICE void separate_reduction(int reduce_idx) { int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx; // Reduce by sk-tile (every tile contributed to by one or more blocks) reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments; reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments; int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile(); int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1; peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first); peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last); // Wait for peers to complete int peer_idx_end = peer_idx_last + 1; int num_peers = peer_idx_end - peer_idx_begin; Barrier::wait_eq_reset( params.barrier_workspace, thread_idx, (reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx, num_peers); /// The location of this tile (in threadblock-tile coordinates) in the output matrix GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx); // Location of this tile in item-coords MatrixCoord threadblock_item_begin( tiled_coord.m() * Mma::Shape::kM, tiled_coord.n() * Mma::Shape::kN ); ElementC *ptr_C1 = static_cast<ElementC *>(params.ptr_C1); ElementC *ptr_C2 = static_cast<ElementC *>(params.ptr_C2); ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D); typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor); // Define the reduction output pointer and move to the appropriate place typename Epilogue::ElementVector *ptr_Vector = static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector); // Tile iterator loading from residual1. typename Epilogue::OutputTileIterator iterator_C1( params.params_C1, ptr_C1, params.block_mapping.problem_size.mn(), thread_idx, threadblock_item_begin); // Tile iterator loading from residual2. typename Epilogue::OutputTileIterator iterator_C2( params.params_C2, ptr_C2, params.block_mapping.problem_size.mn(), thread_idx, threadblock_item_begin); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.block_mapping.problem_size.mn(), thread_idx, threadblock_item_begin); // Additional tensor to load from typename Epilogue::TensorTileIterator tensor_iterator( params.params_Tensor, ptr_Tensor, params.block_mapping.problem_size.mn(), thread_idx, threadblock_item_begin); // Move to appropriate location for this output tile if (ptr_Vector) { ptr_Vector += threadblock_item_begin.column() + tiled_coord.m() * params.ldr; } // Execute the epilogue operator to update the destination tensor. epilogue.reduce( peer_idx_begin, peer_idx_end, reduce_fragment_idx, params.partials_workspace, EpilogueOutputOp(params.output_op), ptr_Vector, iterator_D, iterator_C1, iterator_C2, tensor_iterator, params.block_mapping.problem_size.mn(), threadblock_item_begin); } CUTLASS_DEVICE void process_tile( TileWorkDesc tile_work, int block_idx, int dp_start_block_idx, int block_iter_begin) { // Initialize input iterators typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode); typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode); // Initialize accumulators AccumulatorTile accumulator_tile; accumulator_tile.clear(); // Initialize MMA abstraction Mma mma( shared_storage.main_loop, thread_idx, warp_idx, lane_idx); // Perform this tile's range of multiply-accumulate (MAC) iterations mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile); if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) || (params.block_mapping.reduction_blocks == 0) || (block_idx >= dp_start_block_idx)) { // // Cooperative SK peer reduction or DP block // int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx); if (!tile_work.tile_finished(params)) { // Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace share_accumulators(accumulator_tile, block_idx, first_block_idx); } else { // DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile if (!tile_work.tile_started()) { // A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks acquire_accumulators(accumulator_tile, block_idx, first_block_idx); } do_epilogue(tile_work, accumulator_tile); } } else { // // Separate peer reduction // // Share accumulator partial sums with peer threadblock(s) through scratch workspace epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started()); // Signal arrival Barrier::arrive_range_inc( params.barrier_workspace, thread_idx, tile_work.tile_idx * Epilogue::kAccumulatorFragments, Epilogue::kAccumulatorFragments); } } /// Executes one GEMM CUTLASS_DEVICE void gemm() { // Initialize block's iteration range int tile_idx = 0; int block_iter_begin = 0; int block_iters_remaining = 0; int block_idx = params.block_mapping.get_block_idx(); int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region(); int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms; int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks; int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks; // Initialize tile work descriptor TileWorkDesc tile_work; bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx); bool sk_block = (block_idx < sk_padding_start_block_idx); bool reduce_block = (block_idx >= reduce_start_block_idx) && (block_idx < grid_padding_start_block_idx) && (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed); if (dp_block) { // This is a DP block int dp_block_idx = block_idx - dp_start_block_idx; int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles; // Blocks in first DP wave get configured number of tiles tile_idx = first_dp_tile + dp_block_idx; int tile_allottment = params.block_mapping.dp_first_wave_tiles; // Blocks in subsequent DP waves get 1 tile if (dp_block_idx >= params.block_mapping.avail_sms) { tile_allottment = 1; tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms; } block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment; init_dp_tile_work(tile_work, tile_idx); // DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1) if ((tile_idx < params.block_mapping.sk_tiles) || (tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) || (tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n())) { return; } } else if (sk_block) { // This is a SK block int block_iter_end; params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end); block_iters_remaining = block_iter_end - block_iter_begin; tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1); init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); } else { if (reduce_block) { // This is a reduction threadblock int reduce_block_idx = block_idx - reduce_start_block_idx; separate_reduction(reduce_block_idx); } return; } // Iteration-processing loop body CUTLASS_PRAGMA_NO_UNROLL while (true) { // Perform this block's share of work for this tile process_tile( tile_work, block_idx, dp_start_block_idx, block_iter_begin); block_iters_remaining -= tile_work.k_iters_remaining; if (block_iters_remaining == 0) { break; } // Continue to next tile __syncthreads(); if (block_idx >= dp_start_block_idx) { // DP block consume their tiles at stride tile_idx += params.block_mapping.avail_sms; init_dp_tile_work(tile_work, tile_idx); } else { // SK blocks consume their tiles in backwards order tile_idx--; init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); } } } public: // // Device-only API // // Factory invocation CUTLASS_DEVICE static void invoke( Params const &params, SharedStorage &shared_storage) { GemmStreamkWithFusedEpilogue op(params, shared_storage); op(); } // Constructor CUTLASS_DEVICE GemmStreamkWithFusedEpilogue( Params const &params, SharedStorage &shared_storage) : params(params), shared_storage(shared_storage), thread_idx(threadIdx.x), warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code lane_idx(threadIdx.x % 32), epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx) {} /// Executes one GEMM CUTLASS_DEVICE void operator()() { // Generic SK code path gemm(); } }; // GemmStreamkWithFusedEpilogue with one source template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_ ///! Threadblock swizzling function > struct GemmStreamkWithFusedEpilogue<Mma_, Epilogue_, ThreadblockSwizzle_, true> { using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; /// The per-thread tile of raw accumulators using AccumulatorTile = typename Mma::FragmentC; static ComplexTransform const kTransformA = Mma::kTransformA; static ComplexTransform const kTransformB = Mma::kTransformB; using Operator = typename Mma::Operator; using OperatorClass = typename Mma::Operator::OperatorClass; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename Mma::Operator::Shape; using InstructionShape = typename Mma::Policy::Operator::InstructionShape; using ArchTag = typename Mma::ArchTag; static int const kStages = Mma::kStages; static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Workspace bytes per thread block static size_t const kWorkspaceBytesPerBlock = __NV_STD_MAX( kThreadCount * sizeof(AccumulatorTile), Epilogue::kWorkspaceBytesPerBlock); /// Block-striped reduction utility using BlockStripedReduceT = BlockStripedReduce<kThreadCount, AccumulatorTile>; // // Structures // /// Argument structure struct Arguments { // // Data members // GemmUniversalMode mode{GemmUniversalMode::kGemm}; GemmCoord problem_size{}; int batch_count{1}; // Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor typename EpilogueOutputOp::Params epilogue{}; void const * ptr_A{nullptr}; void const * ptr_B{nullptr}; void const * ptr_C{nullptr}; void * ptr_D{nullptr}; void * ptr_Vector{nullptr}; void * ptr_Tensor{nullptr}; int64_t batch_stride_A{0}; int64_t batch_stride_B{0}; int64_t batch_stride_C{0}; int64_t batch_stride_D{0}; int64_t batch_stride_Vector{0}; int64_t batch_stride_Tensor{0}; typename LayoutA::Stride::Index lda{}; typename LayoutB::Stride::Index ldb{}; typename LayoutC::Stride::Index ldc{}; typename LayoutC::Stride::Index ldd{}; typename LayoutC::Stride::Index ldr{}; typename LayoutC::Stride::Index ldt{}; int avail_sms{-1}; /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) // // Methods // /// Default Constructor Arguments() = default; /// constructs an arguments structure Arguments( GemmUniversalMode mode, GemmCoord problem_size, int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K) typename EpilogueOutputOp::Params epilogue, void const * ptr_A, void const * ptr_B, void const * ptr_C, void * ptr_D, void * ptr_Vector, void * ptr_Tensor, int64_t batch_stride_A, int64_t batch_stride_B, int64_t batch_stride_C, int64_t batch_stride_D, int64_t batch_stride_Vector, int64_t batch_stride_Tensor, typename LayoutA::Stride::Index lda, typename LayoutB::Stride::Index ldb, typename LayoutC::Stride::Index ldc, typename LayoutC::Stride::Index ldd, typename LayoutC::Stride::Index ldr, typename LayoutC::Stride::Index ldt, int avail_sms = -1) /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) : mode(mode), problem_size(problem_size), batch_count(batch_split), epilogue(epilogue), ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), ptr_Vector(ptr_Vector), ptr_Tensor(ptr_Tensor), batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_Vector(batch_stride_Vector), batch_stride_Tensor(batch_stride_Tensor), lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ldr(ldr), ldt(ldt), avail_sms(avail_sms) { CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size); CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); CUTLASS_TRACE_HOST(" ldr: " << this->ldr); CUTLASS_TRACE_HOST(" ldt: " << this->ldt); CUTLASS_TRACE_HOST(" avail_sms: " << this->avail_sms); } /// Returns arguments for the transposed problem Arguments transposed_problem() const { Arguments args(*this); std::swap(args.problem_size.m(), args.problem_size.n()); std::swap(args.ptr_A, args.ptr_B); std::swap(args.lda, args.ldb); std::swap(args.batch_stride_A, args.batch_stride_B); return args; } }; /// Parameters structure struct Params { public: // // Data members // void * ptr_A{nullptr}; void * ptr_B{nullptr}; typename Mma::IteratorA::Params params_A{}; typename Mma::IteratorB::Params params_B{}; int64_t batch_stride_A{0}; int64_t batch_stride_B{0}; GemmUniversalMode mode{GemmUniversalMode::kGemm}; ThreadblockSwizzle block_mapping{}; void *barrier_workspace{nullptr}; void *partials_workspace{nullptr}; typename EpilogueOutputOp::Params output_op{}; void * ptr_C{nullptr}; void * ptr_D{nullptr}; void * ptr_Tensor{nullptr}; void * ptr_Vector{nullptr}; typename Epilogue::OutputTileIterator::Params params_C{}; typename Epilogue::OutputTileIterator::Params params_D{}; typename Epilogue::TensorTileIterator::Params params_Tensor{}; int64_t batch_stride_C{0}; int64_t batch_stride_D{0}; int64_t batch_stride_Vector{0}; int64_t batch_stride_Tensor{0}; typename LayoutC::Stride::Index ldr{}; protected: // // Host-only dispatch-utilities // /// Pad the given allocation size up to the nearest cache line static size_t cacheline_align_up(size_t size) { static const int CACHELINE_SIZE = 128; return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE; } /// Get the workspace size needed for barrier size_t get_barrier_workspace_size() const { // For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction, // each reduction block needs its own synchronization flag. int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks); return cacheline_align_up(sizeof(typename Barrier::T) * num_flags); } /// Get the workspace size needed for intermediate partial sums size_t get_partials_workspace_size() const { int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks); } public: // // Host dispatch API // /// Default constructor Params() = default; /// Constructor Params( Arguments const &args, /// GEMM application arguments int device_sms, /// Number of SMs on the device int sm_occupancy) /// Kernel SM occupancy (in thread blocks) : params_A(args.lda), params_B(args.ldb), params_C(args.ldc), params_D(args.ldd), params_Tensor(args.ldt), output_op(args.epilogue), mode(args.mode), ptr_A(const_cast<void *>(args.ptr_A)), ptr_B(const_cast<void *>(args.ptr_B)), ptr_C(const_cast<void *>(args.ptr_C)), ptr_D(args.ptr_D), ptr_Vector(args.ptr_Vector), ldr(args.ldr), ptr_Tensor(args.ptr_Tensor), batch_stride_A(args.batch_stride_A), batch_stride_B(args.batch_stride_B), batch_stride_C(args.batch_stride_C), batch_stride_D(args.batch_stride_D), batch_stride_Vector(args.batch_stride_Vector), batch_stride_Tensor(args.batch_stride_Tensor), barrier_workspace(nullptr), partials_workspace(nullptr) { CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::Params()"); CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); CUTLASS_TRACE_HOST(" ldr: " << this->ldr); CUTLASS_TRACE_HOST(" ldt: " << args.ldt); // Number of SMs to make available for StreamK decomposition int avail_sms = (args.avail_sms == -1) ? device_sms : fast_min(args.avail_sms, device_sms); CUTLASS_TRACE_HOST(" avail_sms: " << avail_sms); // Initialize the block mapping structure block_mapping = ThreadblockSwizzle( args.mode, args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.batch_count, sm_occupancy, device_sms, avail_sms, sizeof(ElementA), sizeof(ElementB), sizeof(ElementC), Epilogue::kAccumulatorFragments); } /// Returns the workspace size (in bytes) needed for these parameters size_t get_workspace_size() const { return get_barrier_workspace_size() + get_partials_workspace_size(); } /// Assign and initialize the specified workspace buffer. Assumes /// the memory allocated to workspace is at least as large as get_workspace_size(). Status init_workspace( void *workspace, cudaStream_t stream = nullptr) { uint8_t *ptr = static_cast<uint8_t*>(workspace); // Establish partials workspace partials_workspace = nullptr; size_t partials_workspace_bytes = get_partials_workspace_size(); if (partials_workspace_bytes > 0) { if (!workspace) { return Status::kErrorWorkspaceNull; } partials_workspace = ptr; ptr += partials_workspace_bytes; } // Establish barrier workspace barrier_workspace = nullptr; size_t barrier_workspace_bytes = get_barrier_workspace_size(); if (barrier_workspace_bytes > 0) { if (!workspace) { return Status::kErrorWorkspaceNull; } barrier_workspace = ptr; ptr += barrier_workspace_bytes; } // Zero-initialize barrier workspace if (barrier_workspace) { size_t barrier_workspace_bytes = get_barrier_workspace_size(); CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes"); cudaError_t result = cudaMemsetAsync( barrier_workspace, 0, barrier_workspace_bytes, stream); if (result != cudaSuccess) { CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result)); return Status::kErrorInternal; } } return Status::kSuccess; } /// Returns the GEMM volume in thread block tiles cutlass::gemm::GemmCoord get_tiled_shape() const { return block_mapping.tiled_shape(); } /// Returns the total number of thread blocks to launch int get_grid_blocks() const { dim3 grid_dims = get_grid_dims(); return grid_dims.x * grid_dims.y * grid_dims.z; } /// Returns the grid extents in thread blocks to launch dim3 get_grid_dims() const { return block_mapping.get_grid_dims(); } /// Lightweight update given a subset of arguments. Problem geometry is assumed /// to remain the same. CUTLASS_HOST_DEVICE void update(Arguments const &args) { ptr_A = const_cast<void *>(args.ptr_A); ptr_B = const_cast<void *>(args.ptr_B); ptr_C = const_cast<void *>(args.ptr_C); ptr_D = args.ptr_D; ptr_Vector = args.ptr_Vector; ldr = args.ldr; ptr_Tensor = args.ptr_Tensor; batch_stride_A = args.batch_stride_A; batch_stride_B = args.batch_stride_B; batch_stride_C = args.batch_stride_C; batch_stride_D = args.batch_stride_D; batch_stride_Vector = args.batch_stride_Vector; batch_stride_Tensor = args.batch_stride_Tensor; output_op = args.epilogue; CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::update()"); CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); CUTLASS_TRACE_HOST(" ldr: " << this->ldr); } }; /// Tile work descriptor struct TileWorkDesc { /// The linear tile index int tile_idx; /// The location of this tile (in threadblock-tile coordinates) in the output matrix cutlass::gemm::GemmCoord tiled_coord; // The first global-scoped MAC-iteration this threadblock will perform for this tile int iter_begin; // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile int k_begin; // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile int k_end; /// The number of remaining MAC-iterations this threadblock will perform for this tile int k_iters_remaining; // Whether this block will perform the first iteration of this tile CUTLASS_DEVICE bool tile_started() { return (k_begin == 0); } // Whether this block will perform the last iteration of this tile CUTLASS_DEVICE bool tile_finished(Params const &params) { return (k_end == params.block_mapping.problem_size.k()); } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; protected: // // Data members // /// GEMM problem parameters Params const &params; /// Shared storage reference SharedStorage &shared_storage; /// ID within the threadblock int thread_idx; /// ID of warp int warp_idx; /// ID of each thread within a warp int lane_idx; /// Threadblock scoped epilogue Epilogue epilogue; public: // // Host dispatch API // /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size) { CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::can_implement()"); static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; bool isAMisaligned = false; bool isBMisaligned = false; bool isCMisaligned = false; if (platform::is_same<LayoutA, layout::RowMajor>::value) { isAMisaligned = problem_size.k() % kAlignmentA; } else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) { isAMisaligned = problem_size.m() % kAlignmentA; } else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value || platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) { isAMisaligned = problem_size.k() % kAlignmentA; } if (platform::is_same<LayoutB, layout::RowMajor>::value) { isBMisaligned = problem_size.n() % kAlignmentB; } else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) { isBMisaligned = problem_size.k() % kAlignmentB; } else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value || platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) { isBMisaligned = problem_size.k() % kAlignmentB; } if (platform::is_same<LayoutC, layout::RowMajor>::value) { isCMisaligned = problem_size.n() % kAlignmentC; } else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) { isCMisaligned = problem_size.m() % kAlignmentC; } else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value || platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) { isCMisaligned = problem_size.n() % kAlignmentC; } if (isAMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); return Status::kErrorMisalignedOperand; } if (isBMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); return Status::kErrorMisalignedOperand; } if (isCMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); return Status::kErrorMisalignedOperand; } CUTLASS_TRACE_HOST(" returning kSuccess"); return Status::kSuccess; } static Status can_implement(Arguments const &args) { return can_implement(args.problem_size); } protected: // // Device-only utility methods // /// Iterator for fetching tile fragments from A CUTLASS_DEVICE typename Mma::IteratorA init_iterator_A( TileWorkDesc &tile_work, GemmUniversalMode mode) { // The input A matrix ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A); // Update input pointers based on batched/array mode if (mode == GemmUniversalMode::kBatched) { ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A; } if (mode == GemmUniversalMode::kArray) { ptr_A = static_cast<ElementA * const *>(params.ptr_A)[tile_work.tiled_coord.k()]; } int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM; int m_end = params.block_mapping.problem_size.m(); return Mma::IteratorA( params.params_A, ptr_A, { m_end, tile_work.k_end }, threadIdx.x, { m_begin, tile_work.k_begin }); } /// Iterator for fetching tile fragments from B CUTLASS_DEVICE typename Mma::IteratorB init_iterator_B( TileWorkDesc &tile_work, GemmUniversalMode mode) { // The input B matrix ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B); // Update input pointers based on batched/array mode if (mode == GemmUniversalMode::kBatched) { ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B; } if (mode == GemmUniversalMode::kArray) { ptr_B = static_cast<ElementB * const *>(params.ptr_B)[tile_work.tiled_coord.k()]; } int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN; int n_end = params.block_mapping.problem_size.n(); return Mma::IteratorB( params.params_B, ptr_B, { tile_work.k_end, n_end }, threadIdx.x, { tile_work.k_begin, n_begin }); } CUTLASS_DEVICE void init_dp_tile_work( TileWorkDesc &tile_work, int tile_idx) { // The linear tile index tile_work.tile_idx = tile_idx; // The first global-scoped MAC-iteration this threadblock will perform for this tile tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile(); // The number of MAC-iterations this threadblock will perform for this tile tile_work.k_iters_remaining = params.block_mapping.iters_per_tile(); // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_begin = 0; // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_end = params.block_mapping.problem_size.k(); // The location of this tile (in threadblock-tile coordinates) in the output matrix tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); } CUTLASS_DEVICE void init_sk_tile_work( TileWorkDesc &tile_work, int tile_idx, int block_iter_begin, int block_iter_end) { // The linear tile index tile_work.tile_idx = tile_idx; // The first global-scoped MAC-iteration for this tile int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile(); // The first global-scoped MAC-iteration this threadblock will perform for this tile tile_work.iter_begin = max(block_iter_begin, tile_iter_begin); // The first tile-scoped MAC-iteration this threadblock will perform for this tile int k_iter_begin = tile_work.iter_begin - tile_iter_begin; // The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile int k_iter_end = block_iter_end - tile_iter_begin; // The number of MAC-iterations this threadblock will perform for this tile tile_work.k_iters_remaining = k_iter_end - k_iter_begin; // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_begin = k_iter_begin * Mma::Shape::kK; // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_end = min( params.block_mapping.problem_size.k(), // extent of k domain (k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment // The location of this tile (in threadblock-tile coordinates) in the output matrix tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); } /// Share accumulators with peers CUTLASS_DEVICE void share_accumulators( AccumulatorTile const &accumulator_tile, int block_idx, int first_block_idx) { AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace); int accum_tile_offset = first_block_idx * kThreadCount; if (block_idx == first_block_idx) { // First peer initializes the workspace partials BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); } else { // Subsequent peers atomically accumulate into the workspace partials if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) { // Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1); } else { // Turnstile reduction order: wait until the previous peer has written int wait_count = block_idx - first_block_idx; Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count); } // Perform reduction in workspace BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); } // Signal our arrival Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx); } /// Acquire accumulators from peers CUTLASS_DEVICE void acquire_accumulators( AccumulatorTile &accumulator_tile, int block_idx, int first_block_idx) { AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace); // Wait for arrival int num_carry_in = block_idx - first_block_idx; Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in); // Load and add peer-partials accumulator tile to local accumulator tile int accum_tile_offset = first_block_idx * kThreadCount; BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx); } /// Perform epilogue computations and output CUTLASS_DEVICE void do_epilogue( TileWorkDesc &tile_work, AccumulatorTile &accumulator_tile) { ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C); ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D); typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor); // Define the reduction output pointer and move to the appropriate place typename Epilogue::ElementVector *ptr_Vector = static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector); // Update pointers for batched/array mode(s) if (params.mode == GemmUniversalMode::kBatched) { ptr_C += tile_work.tiled_coord.k() * params.batch_stride_C; ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D; if (ptr_Tensor) { ptr_Tensor = ReferenceFactory<typename Epilogue::ElementTensor>::add_pointer_offset( ptr_Tensor, tile_work.tiled_coord.k() * params.batch_stride_Tensor); } if (ptr_Vector) { ptr_Vector += tile_work.tiled_coord.k() * params.batch_stride_Vector; } } if (params.mode == GemmUniversalMode::kArray) { ptr_C = static_cast<ElementC * const *>(params.ptr_C)[tile_work.tiled_coord.k()]; ptr_D = static_cast<ElementC * const *>(params.ptr_D)[tile_work.tiled_coord.k()]; if (ptr_Tensor) { ptr_Tensor = static_cast<typename Epilogue::ElementTensor * const *>(params.ptr_Tensor)[tile_work.tiled_coord.k()]; } if (ptr_Vector) { ptr_Vector = static_cast<typename Epilogue::ElementVector * const *>(params.ptr_Vector)[tile_work.tiled_coord.k()]; } } // Location of this tile in item-coords MatrixCoord threadblock_item_begin( tile_work.tiled_coord.m() * Mma::Shape::kM, tile_work.tiled_coord.n() * Mma::Shape::kN ); // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C( params.params_C, ptr_C, params.block_mapping.problem_size.mn(), thread_idx, threadblock_item_begin); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.block_mapping.problem_size.mn(), thread_idx, threadblock_item_begin); // Additional tensor to load from typename Epilogue::TensorTileIterator tensor_iterator( params.params_Tensor, ptr_Tensor, params.block_mapping.problem_size.mn(), thread_idx, threadblock_item_begin); // Move to appropriate location for this output tile if (ptr_Vector) { ptr_Vector += threadblock_item_begin.column() + tile_work.tiled_coord.m() * params.ldr; } // Execute the epilogue operator to update the destination tensor. epilogue( EpilogueOutputOp(params.output_op), ptr_Vector, iterator_D, accumulator_tile, iterator_C, tensor_iterator, params.block_mapping.problem_size.mn(), threadblock_item_begin); } CUTLASS_DEVICE void separate_reduction(int reduce_idx) { int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx; // Reduce by sk-tile (every tile contributed to by one or more blocks) reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments; reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments; int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile(); int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1; peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first); peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last); // Wait for peers to complete int peer_idx_end = peer_idx_last + 1; int num_peers = peer_idx_end - peer_idx_begin; Barrier::wait_eq_reset( params.barrier_workspace, thread_idx, (reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx, num_peers); /// The location of this tile (in threadblock-tile coordinates) in the output matrix GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx); // Location of this tile in item-coords MatrixCoord threadblock_item_begin( tiled_coord.m() * Mma::Shape::kM, tiled_coord.n() * Mma::Shape::kN ); ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C); ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D); typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor); // Define the reduction output pointer and move to the appropriate place typename Epilogue::ElementVector *ptr_Vector = static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector); // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C( params.params_C, ptr_C, params.block_mapping.problem_size.mn(), thread_idx, threadblock_item_begin); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.block_mapping.problem_size.mn(), thread_idx, threadblock_item_begin); // Additional tensor to load from typename Epilogue::TensorTileIterator tensor_iterator( params.params_Tensor, ptr_Tensor, params.block_mapping.problem_size.mn(), thread_idx, threadblock_item_begin); // Move to appropriate location for this output tile if (ptr_Vector) { ptr_Vector += threadblock_item_begin.column() + tiled_coord.m() * params.ldr; } // Execute the epilogue operator to update the destination tensor. epilogue.reduce( peer_idx_begin, peer_idx_end, reduce_fragment_idx, params.partials_workspace, EpilogueOutputOp(params.output_op), ptr_Vector, iterator_D, iterator_C, tensor_iterator, params.block_mapping.problem_size.mn(), threadblock_item_begin); } CUTLASS_DEVICE void process_tile( TileWorkDesc tile_work, int block_idx, int dp_start_block_idx, int block_iter_begin) { // Initialize input iterators typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode); typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode); // Initialize accumulators AccumulatorTile accumulator_tile; accumulator_tile.clear(); // Initialize MMA abstraction Mma mma( shared_storage.main_loop, thread_idx, warp_idx, lane_idx); // Perform this tile's range of multiply-accumulate (MAC) iterations mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile); if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) || (params.block_mapping.reduction_blocks == 0) || (block_idx >= dp_start_block_idx)) { // // Cooperative SK peer reduction or DP block // int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx); if (!tile_work.tile_finished(params)) { // Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace share_accumulators(accumulator_tile, block_idx, first_block_idx); } else { // DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile if (!tile_work.tile_started()) { // A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks acquire_accumulators(accumulator_tile, block_idx, first_block_idx); } do_epilogue(tile_work, accumulator_tile); } } else { // // Separate peer reduction // // Share accumulator partial sums with peer threadblock(s) through scratch workspace epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started()); // Signal arrival Barrier::arrive_range_inc( params.barrier_workspace, thread_idx, tile_work.tile_idx * Epilogue::kAccumulatorFragments, Epilogue::kAccumulatorFragments); } } /// Executes one GEMM CUTLASS_DEVICE void gemm() { // Initialize block's iteration range int tile_idx = 0; int block_iter_begin = 0; int block_iters_remaining = 0; int block_idx = params.block_mapping.get_block_idx(); int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region(); int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms; int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks; int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks; // Initialize tile work descriptor TileWorkDesc tile_work; bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx); bool sk_block = (block_idx < sk_padding_start_block_idx); bool reduce_block = (block_idx >= reduce_start_block_idx) && (block_idx < grid_padding_start_block_idx) && (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed); if (dp_block) { // This is a DP block int dp_block_idx = block_idx - dp_start_block_idx; int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles; // Blocks in first DP wave get configured number of tiles tile_idx = first_dp_tile + dp_block_idx; int tile_allottment = params.block_mapping.dp_first_wave_tiles; // Blocks in subsequent DP waves get 1 tile if (dp_block_idx >= params.block_mapping.avail_sms) { tile_allottment = 1; tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms; } block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment; init_dp_tile_work(tile_work, tile_idx); // DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1) if ((tile_idx < params.block_mapping.sk_tiles) || (tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) || (tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n())) { return; } } else if (sk_block) { // This is a SK block int block_iter_end; params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end); block_iters_remaining = block_iter_end - block_iter_begin; tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1); init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); } else { if (reduce_block) { // This is a reduction threadblock int reduce_block_idx = block_idx - reduce_start_block_idx; separate_reduction(reduce_block_idx); } return; } // Iteration-processing loop body CUTLASS_PRAGMA_NO_UNROLL while (true) { // Perform this block's share of work for this tile process_tile( tile_work, block_idx, dp_start_block_idx, block_iter_begin); block_iters_remaining -= tile_work.k_iters_remaining; if (block_iters_remaining == 0) { break; } // Continue to next tile __syncthreads(); if (block_idx >= dp_start_block_idx) { // DP block consume their tiles at stride tile_idx += params.block_mapping.avail_sms; init_dp_tile_work(tile_work, tile_idx); } else { // SK blocks consume their tiles in backwards order tile_idx--; init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); } } } public: // // Device-only API // // Factory invocation CUTLASS_DEVICE static void invoke( Params const &params, SharedStorage &shared_storage) { GemmStreamkWithFusedEpilogue op(params, shared_storage); op(); } // Constructor CUTLASS_DEVICE GemmStreamkWithFusedEpilogue( Params const &params, SharedStorage &shared_storage) : params(params), shared_storage(shared_storage), thread_idx(threadIdx.x), warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code lane_idx(threadIdx.x % 32), epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx) {} /// Executes one GEMM CUTLASS_DEVICE void operator()() { // Generic SK code path gemm(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h", "repo_id": "cutlass", "token_count": 32264 }
39
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Problem visitor for grouped Rank2K operations. This problem visitor is specialized for Rank2K operations, for which matrix C is upper/lower triangular. Using a problem visitor designed for GEMMs for Rank2K problems is inefficient because threadblocks will be frequently assigned to tiles that exit early (e.g., due to being assigned to a tile in the upper-triangular portion of a lower-triangular problem). This can lead to load imbalance among threadblocks, as the GEMM-based scheduler assigns all threadblocks to nearly the same number of tiles, regardless of whether those tiles exit early. Consider an example of a group of four Rank2Ks with matrix C consisting of a grid of 2x2 tiles. Consider a grid of 8 threadblocks. The default GEMM scheduler will assign threadblocks to tiles in the following order: Rank2K 0 Rank2K 1 Rank2K 2 Rank2K 3 0 1 4 5 0 1 4 5 2 3 6 7 2 3 6 7 Assuming that the problems are lower triangular, blocks 1 and 5 are continuously assigned to inactive tiles. This problem visitor aims to assign threadblocks to only those tiles which are in the upper/lower triangular portion of a given problem. Using the example above, the resulting assignment would be: Rank2K 0 Rank2K 1 Rank2K 2 Rank2K 3 0 - 3 - 6 - 1 - 1 2 4 5 7 0 2 3 Achieving the schedule above requires a mapping from threadblock ID to tile coordinates (i, j). We will illustrate this by mapping on a lower-triangular matrix with a 3x3 grid. We first calculate row and column indices assuming one-indexed rows, tiles, and threadblock IDs, and then subtract one to convert to zero-indexed. Col 1 Col 2 Col 3 ---------------------- Row 1 | 1 - - Row 2 | 2 3 - Row 3 | 4 5 6 We next outline this mapping, borrowing from: https://stackoverflow.com/a/40954159 Calculating row i given threadblock ID t ---------------------------------------- For a given row i, all threadblock IDs t in that row satisfy the following: t <= 1 + 2 + 3 + ... + (i-1) + i The closed-form equation for the right-hand side is: i(i+1)/2. Using this, we can solve for i given t: t <= i(i+1)/2 2t <= i^2 + i 2t <= i^2 + i + 0.25 - 0.25 2t + 0.25 <= i^2 + i + 0.25 2t + 0.25 <= (i + 0.5)^2 sqrt(2t + 0.25) - 0.5 <= i To account for fractional values, we set: i = ceil(sqrt(2t + 0.25) - 0.5) To turn this into a zero-indexed row and work with zero-indexed t, we perform: i = ceil(sqrt(2(t+1) + 0.25) - 0.5) - 1 = ceil(sqrt(2t + 2.25) - 0.5) - 1 Calculating column j given threadblock ID t and row i ----------------------------------------------------- For a given row i, all threadblock IDs t in that row also satisfy the following: t > 1 + 2 + 3 + ... + (i-2) + (i-1) --> t > i(i-1)/2 Threadblock IDs within a given row are sequential, so the one-indexed column ID for one-indexed threadblock ID t and row i is: j = t - (i(i-1)/2) The zero-indexed version becomes: j = (t+1) - (i(i+1)/2) -1 = t - (i(i+1)/2) Accounting for non-square grids ------------------------------- Though the overall output problem size for Rank2K problems is guranteed to be square, the grids used in computing may not be square due to using non-square threadblock shapes. For example, a threadblock shape of 64x32 operating on a problem of output size 128x128 would result in a grid of 2x4 tiles. This case can be handled by noting that the output resembles a square grid of 2x2 "macro tiles" each of which contains 2 "true tiles." We can thus first map a threadblock ID to its "macro tile" using the equations above, and then map it to the "true tile" within its "macro tile." In the example of a 2x4 grid, this mapping would look as follows: "Macro grid" "True grid" {0, 1} - 0 1 - - {2, 3} {4, 5} 2 3 4 5 A zero-indexed threadblock ID t is mapped to its "macro tile ID" t_macro as: t_macro = t // r Where r is the ratio of the maximum dimension of the grid to the minimum dimension of the grid (i.e., r = 4 / 2 = 2 in the previous example). One uses t_macro and the calculations above to find the row and column in the square matrix to obtain i_macro and j_macro (zero-indexed). The mapping from (i_macro, j_macro) --> (i, j) is simply the following: if (ThreadblockShape::M > ThreadblockShape::N): r = ThreadblockShape::M / ThreadblockShape::N i = i_macro j = (j_macro * r) + (t % r) elif (ThreadblockShape::M < ThreadblockShape::N): r = ThreadblockShape::N / ThreadblockShape::M i = (i_macro * r) + (t % r) j = j_macro else: i = i_macro j = j_macro Handling cases with grid dimensions that aren't multiples of eachother ---------------------------------------------------------------------- Even though threadblock shapes M and N are typically multiples of one another, the grid for a given problem may not have dimensions of the same ratio as that of the threadblock. For example, a problem of size 132x132 using a threadblock of shape 64x32 will result in a grid of 3x5 tiles. In this case, there is not an integer number of "true tiles" per "macro tile." When this scenario arises, we simply pad the larger dimension of the grid such that there are an integer number of "true tiles" per "macro tile." Thus, the 3x5 grid in the example above will be treated as a 3x6 grid. Row and column positions for each tile are calculated as above. Any threadblocks that map to tiles that are outside the problem range or upper/lower triangular portion (e.g., (2, 5)) will exit early from this problem and may proceed to the next problem in the group. Handling upper-triangular matrices ---------------------------------- The only modification needed for upper-triangular matrices is to swap i_macro and j_macro in the calculations above. */ #pragma once #include "cutlass/blas3.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/gemm/kernel/grouped_problem_visitor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { namespace detail { ///////////////////////////////////////////////////////////////////////////////////////////////// // // Helpers for calculating offsets for Rank2K problem visitor. These helpers specifically pertain // to the conversion from "macro tiles" to "true tiles" in the description above. // template < typename ThreadblockShape, typename Enable = void > struct Rank2KGroupedProblemVisitorOffsetHelper; // Partial specialization for the case where threadblock shape M > threadblock shape N template < typename ThreadblockShape > struct Rank2KGroupedProblemVisitorOffsetHelper< ThreadblockShape, typename platform::enable_if< (ThreadblockShape::kM > ThreadblockShape::kN) >::type > { static_assert(ThreadblockShape::kM % ThreadblockShape::kN == 0, "Rank2KGroupedProblemVisitor with threadblock shape M > threadblock shape N " "requires that threadblock shape M be a multiple of threadblock shape N."); static int32_t const kThreadblockSkewRatio = ThreadblockShape::kM / ThreadblockShape::kN; CUTLASS_HOST_DEVICE static int32_t min_dim(cutlass::gemm::GemmCoord grid) { return grid.m(); } CUTLASS_HOST_DEVICE static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) { return row; } CUTLASS_HOST_DEVICE static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) { return (col * kThreadblockSkewRatio) + (threadblock_id % kThreadblockSkewRatio); } }; // Partial specialization for the case where threadblock shape M < threadblock shape N template < typename ThreadblockShape > struct Rank2KGroupedProblemVisitorOffsetHelper< ThreadblockShape, typename platform::enable_if< (ThreadblockShape::kM < ThreadblockShape::kN) >::type > { static_assert(ThreadblockShape::kN % ThreadblockShape::kM == 0, "Rank2KGroupedProblemVisitor with threadblock shape M < threadblock shape N " "requires that threadblock shape N be a multiple of threadblock shape M."); static int32_t const kThreadblockSkewRatio = ThreadblockShape::kN / ThreadblockShape::kM; CUTLASS_HOST_DEVICE static int32_t min_dim(cutlass::gemm::GemmCoord grid) { return grid.n(); } CUTLASS_HOST_DEVICE static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) { return (row * kThreadblockSkewRatio) + (threadblock_id % kThreadblockSkewRatio); } CUTLASS_HOST_DEVICE static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) { return col; } }; // Partial specialization for the case where threadblock shape M == threadblock shape N // In this case, macro tiles are equivalent to true tiles, so the conversions are // identity functions. template < typename ThreadblockShape > struct Rank2KGroupedProblemVisitorOffsetHelper< ThreadblockShape, typename platform::enable_if< (ThreadblockShape::kM == ThreadblockShape::kN) >::type > { static int32_t const kThreadblockSkewRatio = 1; CUTLASS_HOST_DEVICE static int32_t min_dim(cutlass::gemm::GemmCoord grid) { return grid.m(); } CUTLASS_HOST_DEVICE static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) { return row; } CUTLASS_HOST_DEVICE static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) { return col; } }; // Helper for correctly representing problem sizes in grouped kernels template <typename ThreadblockShape> struct Rank2KGroupedProblemSizeHelper { using OffsetHelper = Rank2KGroupedProblemVisitorOffsetHelper<ThreadblockShape>; CUTLASS_HOST_DEVICE static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) { return cutlass::gemm::GemmCoord( ((problem.m() - 1 + ThreadblockShape::kM) / ThreadblockShape::kM), ((problem.n() - 1 + ThreadblockShape::kN) / ThreadblockShape::kN), 1); } CUTLASS_HOST_DEVICE static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) { // Return the number of tiles at or below the diagonal (or at and above // for mode kUpper). We do this by first calculating this value assuming // we have a square matrix of tiles of size `dim x dim` where `dim` is the // minimum among {grid.m(), grid.n()}. We then multiply the resulting value // by OffsetHelper::kThreadblockSkewRatio to account for cases in which there // are more tiles in one dimension than the other. int32_t dim = OffsetHelper::min_dim(grid); int32_t tiles_on_diagonal = dim; int32_t tiles_below_diagonal = ((dim * (dim - 1)) / 2); return (tiles_on_diagonal + tiles_below_diagonal) * OffsetHelper::kThreadblockSkewRatio; } CUTLASS_HOST_DEVICE static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {} }; } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// // // Default problem visitor for fill modes kUpper and kLower. // template <typename ThreadblockShape, GroupScheduleMode GroupScheduleMode_, int PrefetchTileCount, int ThreadCount, cutlass::FillMode FillModeC> struct Rank2KGroupedProblemVisitor : public GroupedProblemVisitor< detail::Rank2KGroupedProblemSizeHelper<ThreadblockShape>, ThreadblockShape, GroupScheduleMode_, PrefetchTileCount, ThreadCount> { static cutlass::FillMode const kFillModeC = FillModeC; static_assert(kFillModeC == cutlass::FillMode::kLower || kFillModeC == cutlass::FillMode::kUpper, "Default Rank2KGroupedProblemVisitor requires fill mode of kLower or kUpper."); using ProblemSizeHelper = detail::Rank2KGroupedProblemSizeHelper<ThreadblockShape>; using Base = GroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape, GroupScheduleMode_, PrefetchTileCount, ThreadCount>; using OffsetHelper = typename ProblemSizeHelper::OffsetHelper; using Params = typename Base::Params; using SharedStorage = typename Base::SharedStorage; // // Methods // CUTLASS_DEVICE Rank2KGroupedProblemVisitor( Params const &params_, SharedStorage &shared_storage_, int32_t block_idx ): Base(params_, shared_storage_, block_idx) {} CUTLASS_DEVICE cutlass::gemm::GemmCoord threadblock_offset(int32_t threadblock_id) const { int32_t macro_id = threadblock_id / OffsetHelper::kThreadblockSkewRatio; int32_t macro_row = ceil(cutlass::fast_sqrt((2*macro_id) + 2.25) - 0.5) - 1; int32_t macro_col = macro_id - (((macro_row+1) * macro_row)/2); if (kFillModeC == cutlass::FillMode::kUpper) { swap(macro_row, macro_col); } int32_t row = OffsetHelper::macro_row_to_row(macro_row, threadblock_id); int32_t col = OffsetHelper::macro_col_to_col(macro_col, threadblock_id); return cutlass::gemm::GemmCoord(row, col, 0); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h", "repo_id": "cutlass", "token_count": 5798 }
40
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/params_sparse_base.h" #include "cutlass/matrix_coord.h" #include "cutlass/semaphore.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled. > struct SparseGemm { using Mma = Mma_; using Epilogue = Epilogue_; using OutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; static bool const kSplitKSerial = SplitKSerial; static int const kSparse = Mma::kSparse; static int const kMetaSizeInBits = Mma::kMetaSizeInBits; static int const kMaxID2 = Mma::kMaxID2; static int const kElementsPerElementE = Mma::kElementsPerElementE; using ElementE = typename Mma::ElementE; using LayoutE = typename Mma::LayoutE; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; using ParamsA = typename Mma::IteratorA::Params; using TensorRefA = typename Mma::IteratorA::TensorRef; using ParamsB = typename Mma::IteratorB::Params; using TensorRefB = typename Mma::IteratorB::TensorRef; using ParamsE = typename Mma::IteratorE::Params; using TensorRefE = typename Mma::IteratorE::TensorRef; /// Parameters structure struct Params : public SparseParamsBase< ThreadblockSwizzle, ParamsA, TensorRefA, ParamsB, TensorRefB, ParamsE, TensorRefE> { using Base = SparseParamsBase< ThreadblockSwizzle, ParamsA, TensorRefA, ParamsB, TensorRefB, ParamsE, TensorRefE>; // // Data members // typename Epilogue::OutputTileIterator::Params params_C; typename Epilogue::OutputTileIterator::TensorRef ref_C; typename Epilogue::OutputTileIterator::Params params_D; typename Epilogue::OutputTileIterator::TensorRef ref_D; typename OutputOp::Params output_op; int *semaphore; // // Methods // CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params( cutlass::gemm::GemmCoord const & problem_size, cutlass::gemm::GemmCoord const & grid_tiled_shape, TensorRefA ref_A, TensorRefB ref_B, typename Epilogue::OutputTileIterator::TensorRef ref_C, typename Epilogue::OutputTileIterator::TensorRef ref_D, TensorRefE ref_E, typename OutputOp::Params output_op = typename OutputOp::Params(), int *workspace = nullptr ): Base(problem_size, grid_tiled_shape, ref_A, ref_B, ref_E, Mma::Shape::kK), params_C(ref_C.layout()), ref_C(ref_C), params_D(ref_D.layout()), ref_D(ref_D), output_op(output_op) { semaphore = workspace; } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; // // Methods // CUTLASS_HOST_DEVICE SparseGemm() { } /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size, typename Mma::IteratorA::TensorRef ref_A, typename Mma::IteratorB::TensorRef ref_B, typename Epilogue::OutputTileIterator::TensorRef ref_C, typename Epilogue::OutputTileIterator::TensorRef ref_D, typename Mma::IteratorE::TensorRef ref_E) { static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; static int const kAlignmentE = Mma::IteratorE::AccessType::kElements; if (!TensorRef_aligned(ref_A, kAlignmentA)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_B, kAlignmentB)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_C, kAlignmentC)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_D, kAlignmentC)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_E, kAlignmentE)) { return Status::kErrorMisalignedOperand; } if ((problem_size.m() % kAlignmentA) || ((problem_size.k() / kSparse) % kAlignmentA) || (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC) || (problem_size.m() % kAlignmentE) || ((problem_size.k() / kSparse) % kAlignmentE)) { return Status::kErrorMisalignedOperand; } // The k dimension has to be the multiple of the Threadblock k because out // of bound meta data would be initialized to 0 by acync.zfill but 0 is not // a valid meta data. if (problem_size.k() % Mma::Shape::kK) { return Status::kErrorMisalignedOperand; } // M dimension has to be multiple of 32 (sparse float) or 16 (sparse int) // because of the row reordering of operand E static int const kAlignmentM = (sizeof(ElementE) == 2) ? 32 : 16; if (problem_size.m() % kAlignmentM) { return Status::kErrorMisalignedOperand; } return Status::kSuccess; } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.k() * params.gemm_k_size / kSparse, }; cutlass::MatrixCoord tb_offset_B{ threadblock_tile_offset.k() * params.gemm_k_size, threadblock_tile_offset.n() * Mma::Shape::kN }; cutlass::MatrixCoord tb_offset_E{ threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.k() * params.gemm_k_size / kSparse, }; // Problem size is a function of threadblock index in the K dimension int problem_size_k = min( params.problem_size.k(), (threadblock_tile_offset.k() + 1) * params.gemm_k_size); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - tb_offset_B.row() + Mma::Shape::kK - 1) / Mma::Shape::kK; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A, B, and E operands typename Mma::IteratorA iterator_A( params.params_A, params.ref_A.data(), {params.problem_size.m(), problem_size_k / kSparse}, thread_idx, tb_offset_A); typename Mma::IteratorB iterator_B( params.params_B, params.ref_B.data(), {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B); typename Mma::IteratorE iterator_E( params.params_E, params.ref_E.data(), {params.problem_size.m(), problem_size_k / kSparse / kElementsPerElementE}, thread_idx, tb_offset_E); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); if (!kSplitKSerial || gemm_k_iterations > 0) { // Compute threadblock-scoped matrix multiply-add mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, iterator_E, accumulators); } // // Epilogue // OutputOp output_op(params.output_op); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.n() * Mma::Shape::kN ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); // If performing a reduction via split-K, fetch the initial synchronization if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C( params.params_C, params.ref_C.data(), params.problem_size.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, params.ref_D.data(), params.problem_size.mn(), thread_idx, threadblock_offset ); Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_offset.k()); __threadfence(); } // Execute the epilogue operator to update the destination tensor. epilogue(output_op, iterator_D, accumulators, iterator_C); // // Release the semaphore // if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } __threadfence(); semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/kernel/sparse_gemm.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/sparse_gemm.h", "repo_id": "cutlass", "token_count": 4865 }
41
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data layout of the global memory fragments, data types, and internal tile sizes. Partial specializations for threadblock::Mma operations targeting simt instructions. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/fast_math.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h" #include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h" #include "cutlass/gemm/warp/mma_simt_policy.h" #include "cutlass/gemm/warp/mma_simt.h" #include "cutlass/gemm/threadblock/default_mma_core.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { namespace detail { // convert a WarpShape which is the whole tile of elements into warp num threads. // The goal is for each thread's tile of elements to be as square as possible // for performance (4x4 will be faster than 2x8). template<typename WarpShape> constexpr int simt_get_warp_threads_m() { return (WarpShape::kM > WarpShape::kN) ? 8 : 4; } /// Computes padding in shared memory to perform efficient transpose without bank conflicts. constexpr int simt_transpose_padding(int threads, int crosswise, int size_in_bits) { return (size_in_bits >= 32 ? threads / crosswise / (size_in_bits / 32) : threads / crosswise * (32 / size_in_bits) ); } } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: row-major /// Operator: simt class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_, layout::ColumnMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 1>; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; static int const PartitionsK = Shape::kK / WarpShape::kK; /// Default Operator using Operator = Operator_; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, PartitionsK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA >; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>(); static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) >; /// Used for partial specialization /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: column-major /// Operator: simt class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_, layout::RowMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 1>; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; static int const PartitionsK = Shape::kK / WarpShape::kK; /// Default Operator using Operator = Operator_; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, PartitionsK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator A using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, SmemThreadMapA // was IteratorThreadMapA >; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator A using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, SmemThreadMapB // was IteratorThreadMapA >; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>(); static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value); static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value); static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN), "Padding must be divisible by Lane"); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) >; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts WarpCount::kK >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: row-major /// Operator: simt class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_, layout::RowMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 1>; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; static int const PartitionsK = Shape::kK / WarpShape::kK; /// Default Operator using Operator = Operator_; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, PartitionsK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator A using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, SmemThreadMapA >; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>(); static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value); static_assert(!(kPaddingM % LaneM), "Padding must be divisible by Lane"); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) >; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts MatrixShape<0, 0>, WarpCount::kK >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: column-major /// Operator: simt class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_, layout::ColumnMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 1>; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; static int const PartitionsK = Shape::kK / WarpShape::kK; /// Default Operator using Operator = Operator_; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, PartitionsK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA >; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator A using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, SmemThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>(); static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value); static_assert(!(kPaddingN % LaneN), "Padding must be divisible by Lane"); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) >; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<0, 0>, MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts WarpCount::kK >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: row-major /// Operator: simt class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_, layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 1>; using ElementA = ElementA_; using LayoutA = layout::AffineRank2ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::AffineRank2RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementC, LayoutC, OperatorClass, 2, Operator>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: column-major /// Operator: simt class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_, layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 1>; using ElementA = ElementA_; using LayoutA = layout::AffineRank2RowMajor; using ElementB = ElementB_; using LayoutB = layout::AffineRank2ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementC, LayoutC, OperatorClass, 2, Operator>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: row-major /// Operator: simt class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_, layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 1>; using ElementA = ElementA_; using LayoutA = layout::AffineRank2RowMajor; using ElementB = ElementB_; using LayoutB = layout::AffineRank2RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementC, LayoutC, OperatorClass, 2, Operator>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: column-major /// Operator: simt class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 1>, ElementA_, layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 1>; using ElementA = ElementA_; using LayoutA = layout::AffineRank2ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::AffineRank2ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::ColumnMajor, ElementC, LayoutC, OperatorClass, 2, Operator>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: row-major /// Operator: simt class, for dp4a /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 4>, int8_t, layout::ColumnMajor, int8_t, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 4>; using ElementA = int8_t; using LayoutA = layout::ColumnMajor; using ElementB = int8_t; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; static int const PartitionsK = Shape::kK / WarpShape::kK; /// Default Operator using Operator = Operator_; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, PartitionsK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajorInterleaved<4>; using SmemLayoutB = layout::RowMajorInterleaved<4>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<4, 4> >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA >; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<4, 4> >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>(); static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(4, ThreadTileM); static const int LaneN = cutlass::const_min(4, ThreadTileN); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 4>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::ColumnMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) PartitionsK /// Number of partitions along K dimension >; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: // /// /// A: Row-major /// B: Column-major /// Operator: simt class, for dp4a /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 4>, int8_t, layout::RowMajor, int8_t, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 4>; using ElementA = int8_t; using LayoutA = layout::RowMajor; using ElementB = int8_t; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; static int const PartitionsK = Shape::kK / WarpShape::kK; /// Default Operator using Operator = Operator_; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, PartitionsK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajorInterleaved<4>; using SmemLayoutB = layout::RowMajorInterleaved<4>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<4, 4> >; /// Transpose the ThreadMap of iterator A using SmemThreadMapA = transform::TransposePitchLinearThreadMap2DThreadTile<IteratorThreadMapA>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, SmemThreadMapA >; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<4, 4> >; /// Transpose the ThreadMap of iterator A using SmemThreadMapB = transform::TransposePitchLinearThreadMap2DThreadTile<IteratorThreadMapB>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, SmemThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>(); static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(4, ThreadTileM); static const int LaneN = cutlass::const_min(4, ThreadTileN); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 4>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::ColumnMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) PartitionsK /// Number of partitions along K dimension >; static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value); static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value); /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<kPaddingM, 0>, MatrixShape<0, kPaddingN>, WarpCount::kK >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: // /// /// A: Row-major /// B: Row-major /// Operator: simt class, for dp4a /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 4>, int8_t, layout::RowMajor, int8_t, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 4>; using ElementA = int8_t; using LayoutA = layout::RowMajor; using ElementB = int8_t; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; static int const PartitionsK = Shape::kK / WarpShape::kK; /// Default Operator using Operator = Operator_; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, PartitionsK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajorInterleaved<4>; using SmemLayoutB = layout::RowMajorInterleaved<4>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<4, 4> >; /// Transpose the ThreadMap of iterator A using SmemThreadMapA = transform::TransposePitchLinearThreadMap2DThreadTile<IteratorThreadMapA>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, SmemThreadMapA >; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<4, 4> >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>(); static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(4, ThreadTileM); static const int LaneN = cutlass::const_min(4, ThreadTileN); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 4>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::ColumnMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) PartitionsK /// Number of partitions along K dimension >; static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value); static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value); /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<kPaddingM, 0>, MatrixShape<0, 0>, WarpCount::kK >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: // /// /// A: Column-major /// B: Column-major /// Operator: simt class, for dp4a /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_> struct DefaultMmaCore<Shape_, WarpShape_, GemmShape<1, 1, 4>, int8_t, layout::ColumnMajor, int8_t, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_ > { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<1, 1, 4>; using ElementA = int8_t; using LayoutA = layout::ColumnMajor; using ElementB = int8_t; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; static int const PartitionsK = Shape::kK / WarpShape::kK; /// Default Operator using Operator = Operator_; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, PartitionsK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajorInterleaved<4>; using SmemLayoutB = layout::RowMajorInterleaved<4>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinear2DThreadTileStripminedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<4, 4> >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator2dThreadTile< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA >; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinear2DThreadTileStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<4, 4> >; /// Transpose the ThreadMap of iterator A using SmemThreadMapB = transform::TransposePitchLinearThreadMap2DThreadTile<IteratorThreadMapB>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator2dThreadTile< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, SmemThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>(); static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(4, ThreadTileM); static const int LaneN = cutlass::const_min(4, ThreadTileN); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 4>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::ColumnMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) PartitionsK /// Number of partitions along K dimension >; static int const kPaddingM = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value); static int const kPaddingN = detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value); /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<0, 0>, MatrixShape<0, kPaddingN>, WarpCount::kK >; }; } // namespace threadblock } // namespace gemm } // namespace cutlass
cutlass/include/cutlass/gemm/threadblock/default_mma_core_simt.h/0
{ "file_path": "cutlass/include/cutlass/gemm/threadblock/default_mma_core_simt.h", "repo_id": "cutlass", "token_count": 20761 }
42
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a double-buffered threadblock-scoped GEMM kernel. It loads two loop invariant vectors, norm and sum, in the prologue and stores them in the register file. We will call elementwise operation to apply norm and sum between ldmatrix and warp mma. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h" #include "cutlass/gemm/threadblock/mma_base.h" #include "cutlass/gemm/warp/softmax_scale_bias_transform.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class MmaMainloopFusionBase { public: ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Policy describing tuning details using Policy = Policy_; // // Dependent types // /// Warp-level Mma using Operator = typename Policy::Operator; /// Shape describing the overall GEMM computed from shared memory /// by each warp. using WarpGemm = typename Policy::Operator::Shape; /// Shape describing the number of warps filling the CTA using WarpCount = cutlass::gemm::GemmShape<Shape::kM / WarpGemm::kM, Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>; /// Number of warp-level GEMM oeprations static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK); /// Number of stages static int const kStages = Stages; /// Tensor reference to the A operand using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>; /// Tensor reference to the B operand using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>; // // Nested structs // /// Shared storage object needed by threadblock-scoped GEMM class SharedStorage { public: // // Type definitions // /// Shape of the A matrix operand in shared memory using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow, Shape::kK * kStages + Policy::SmemPaddingA::kColumn>; /// Shape of the B matrix operand in shared memory using ShapeB = MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow, Shape::kN + Policy::SmemPaddingB::kColumn>; public: // // Data members // /// Buffer for A operand AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A; /// Buffer for B operand AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B; public: // // Methods // /// Returns a layout object for the A matrix CUTLASS_DEVICE static typename Operator::LayoutA LayoutA() { return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn}); } /// Returns a layout object for the B matrix CUTLASS_HOST_DEVICE static typename Operator::LayoutB LayoutB() { return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn}); } /// Returns a TensorRef to the A operand CUTLASS_HOST_DEVICE TensorRefA operand_A_ref() { return TensorRefA{operand_A.data(), LayoutA()}; } /// Returns a TensorRef to the B operand CUTLASS_HOST_DEVICE TensorRefB operand_B_ref() { return TensorRefB{operand_B.data(), LayoutB()}; } }; protected: // // Data members // /// Iterator to load a warp-scoped tile of A operand from shared memory typename Operator::IteratorA warp_tile_iterator_A_; /// Iterator to load a warp-scoped tile of B operand from shared memory typename Operator::IteratorB warp_tile_iterator_B_; public: /// Construct from tensor references CUTLASS_DEVICE MmaMainloopFusionBase( ///< Shared storage needed for internal use by threadblock-scoped GEMM SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx) : warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx), warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) {} }; /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Iterates over vectors of var and mean vector in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorNormSum_, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Whether problem has been transformed. This determines to which operand /// the softmax is applied. bool InternalTranspose, /// Use zfill or predicate for out-of-bound cp.async SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone, /// Used for partial specialization typename Enable = bool> class MmaSoftmaxMainloopFusionMultistage : public MmaMainloopFusionBase<Shape_, Policy_, Stages> { public: ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Iterates over tiles of A operand in global memory using IteratorA = IteratorA_; ///< Iterates over tiles of B operand in global memory using IteratorB = IteratorB_; ///< Iterates over tiles of the var and mean vectors in global memory using IteratorNormSum = IteratorNormSum_; ///< Policy describing tuning details using Policy = Policy_; ///< Base class using Base = MmaMainloopFusionBase<Shape_, Policy, Stages>; ///< Data type of accumulator matrix using ElementC = ElementC_; ///< Layout of accumulator matrix using LayoutC = LayoutC_; using SmemIteratorA = SmemIteratorA_; using SmemIteratorB = SmemIteratorB_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; // // Dependent types // /// Fragment of accumulator tile using FragmentC = typename Policy::Operator::FragmentC; /// Warp-level Mma using Operator = typename Policy::Operator; /// Minimum architecture is Sm80 to support cp.async using ArchTag = arch::Sm80; /// Complex transform on A operand static ComplexTransform const kTransformA = Operator::kTransformA; /// Complex transform on B operand static ComplexTransform const kTransformB = Operator::kTransformB; /// Internal structure exposed for introspection. struct Detail { static_assert(Base::kWarpGemmIterations > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); /// Number of cp.async instructions to load one stage of operand A static int const AsyncCopyIterationsPerStageA = IteratorA::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB = IteratorB::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of cp.async instructions to load on group of operand A static int const kAccessesPerGroupA = (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB = (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; }; private: using WarpLoadedFragmentA = typename Operator::FragmentA; using WarpLoadedFragmentB = typename Operator::FragmentB; using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; using WarpLoadedFragmentNormSum = typename IteratorNormSum::Fragment; static bool const kInternalTranspose = InternalTranspose; using SoftmaxFragment = typename platform::conditional<kInternalTranspose, WarpTransformedFragmentB, WarpTransformedFragmentA>::type; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA smem_iterator_A_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB smem_iterator_B_; int warp_idx_m_; int warp_idx_n_; public: /// Construct from tensor references CUTLASS_DEVICE MmaSoftmaxMainloopFusionMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx ): Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); warp_idx_m_ = warp_idx_mn % Base::WarpCount::kM; warp_idx_n_ = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset( {warp_idx_m_, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n_}); } CUTLASS_DEVICE void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B, int group_start_A = 0, int group_start_B = 0) { iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector); this->smem_iterator_A_.set_iteration_index(group_start_A); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_A.get(); if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, gmem_ptr, iterator_A.valid()); } else { cutlass::arch::cp_async<kSrcBytes, kCacheOpA>( dst_ptr + v, gmem_ptr, iterator_A.valid()); } ++iterator_A; } ++this->smem_iterator_A_; } } iterator_B.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector); this->smem_iterator_B_.set_iteration_index(group_start_B); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_B.get(); if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, iterator_B.valid()); } else { cutlass::arch::cp_async<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, iterator_B.valid()); } ++iterator_B; } ++this->smem_iterator_B_; } } } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations, ///< destination accumulator tile FragmentC &accum, ///< iterator over A operand in global memory IteratorA iterator_A, ///< iterator over B operand in global memory IteratorB iterator_B, ///< iterator over B operand in global memory IteratorNormSum iterator_norm_sum, ///< initial value of accumulator FragmentC const &src_accum) { // // Prologue // // Issue several complete stages WarpLoadedFragmentNormSum warp_loaded_frag_norm_sum; iterator_norm_sum.add_tile_offset({0, warp_idx_m_}); iterator_norm_sum.load(warp_loaded_frag_norm_sum); CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) { iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); iterator_A.set_iteration_index(0); this->smem_iterator_A_.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; int src_bytes = (iterator_A.valid() ? kSrcBytes : 0); cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, iterator_A.get(), iterator_A.valid()); ++iterator_A; } ++this->smem_iterator_A_; } iterator_B.set_iteration_index(0); this->smem_iterator_B_.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, iterator_B.get(), iterator_B.valid()); ++iterator_B; } ++this->smem_iterator_B_; } // Move to the next stage iterator_A.add_tile_offset({0, 1}); iterator_B.add_tile_offset({1, 0}); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Defines the boundary of a stage of cp.async. cutlass::arch::cp_async_fence(); } // Perform accumulation in the 'd' output operand accum = src_accum; // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA warp_loaded_frag_A[2]; WarpLoadedFragmentB warp_loaded_frag_B[2]; WarpTransformedFragmentA warp_transformed_frag_A[2]; WarpTransformedFragmentB warp_transformed_frag_B[2]; Operator warp_mma; cutlass::gemm::warp::SoftmaxScaleBiasTransform< SoftmaxFragment, WarpLoadedFragmentNormSum> elementwise_transform; this->warp_tile_iterator_A_.set_kgroup_index(0); this->warp_tile_iterator_B_.set_kgroup_index(0); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); // Start issuing the first group of the next stage outside of the mainloop copy_tiles_and_advance(iterator_A, iterator_B); int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], warp_loaded_frag_A[0], warp_loaded_frag_B[0]); if (kInternalTranspose) { elementwise_transform(warp_transformed_frag_B[0], warp_loaded_frag_norm_sum); } else { elementwise_transform(warp_transformed_frag_A[0], warp_loaded_frag_norm_sum); } // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations > (-Base::kStages + 1);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; if (warp_mma_k > 0) { warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], warp_loaded_frag_A[warp_mma_k % 2], warp_loaded_frag_B[warp_mma_k % 2]); if (kInternalTranspose) { elementwise_transform(warp_transformed_frag_B[warp_mma_k % 2], warp_loaded_frag_norm_sum); } else { elementwise_transform(warp_transformed_frag_A[warp_mma_k % 2], warp_loaded_frag_norm_sum); } } // Issue global->shared copies for the next stage int group_start_iteration_A, group_start_iteration_B; if (warp_mma_k + 1 == Base::kWarpGemmIterations) { group_start_iteration_A = 0; group_start_iteration_B = 0; } else { group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; } copy_tiles_and_advance(iterator_A, iterator_B, group_start_iteration_A, group_start_iteration_B); warp_mma( accum, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], accum ); if (warp_mma_k + 2 == Base::kWarpGemmIterations) { // Inserts a memory fence between stages of cp.async instructions. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages have committed. arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_A.add_tile_offset({0, 1}); iterator_B.add_tile_offset({1, 0}); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A_.add_tile_offset( {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_B_.add_tile_offset( {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations; iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); } // Do any conversions feeding the first stage at the end of the loop so // we can start right away on mma instructions if (warp_mma_k + 1 == Base::kWarpGemmIterations) { warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], warp_transformed_frag_B[(warp_mma_k + 1) % 2], warp_loaded_frag_A[(warp_mma_k + 1) % 2], warp_loaded_frag_B[(warp_mma_k + 1) % 2]); if (kInternalTranspose) { elementwise_transform(warp_transformed_frag_B[(warp_mma_k + 1) % 2], warp_loaded_frag_norm_sum); } else { elementwise_transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], warp_loaded_frag_norm_sum); } } } } if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { // commit and drain all pending and predicated cp.async pnz from the GEMM mainloop cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } // Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/threadblock/mma_softmax_mainloop_fusion_multistage.h/0
{ "file_path": "cutlass/include/cutlass/gemm/threadblock/mma_softmax_mainloop_fusion_multistage.h", "repo_id": "cutlass", "token_count": 11315 }
43
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_ref.h" #include "cutlass/matrix_shape.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor_op_multiplicand_sm80.h" #include "cutlass/platform/platform.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for loading 128b vectors of 128b elements. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: PitchLinearShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: PitchLinearShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::TensorOpMultiplicandCongruous128b, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand== Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); static_assert(!(Shape::kContiguous % 8) && !(Shape::kStrided % 4), "Divisibility."); static_assert(sizeof_bits<Element_>::value == 128, "This is specialized for 128b accesses."); /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::TensorOpMultiplicandCongruous128b; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Load two elements per access static int const kElementsPerAccess = 1; /// Policy defining internal details of tile iterator struct Policy { /// Shape of one access using Delta = layout::PitchLinearShape<8, 4>; /// Number of iterations to load using Iterations = layout::PitchLinearShape< Shape::kContiguous / Delta::kContiguous, InstructionShape::kStrided / Delta::kStrided >; }; private: /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); /// Pointer type used for accesses using AccessType = AlignedArray<Element, kElementsPerAccess, 16>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>; private: /// Layout object storing stride values StrideIndex stride_; /// Shared memory base pointers - not advanced AccessType const *pointer_; /// Byte offset incremented as iterator advances Index byte_offset_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0) { int quad_pair = lane_id / 8; int quad = lane_id / 4; int lane = lane_id % 4; int row = (quad & 1) * 4 + (lane ^ quad_pair); byte_offset_ = (row + quad_pair * stride_) * sizeof(AccessType); pointer_= reinterpret_cast<AccessType const *>(ref.data()); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { pointer_ += offset; return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { int offset = (tile_offset.contiguous() * Shape::kContiguous) + (tile_offset.strided() * InstructionShape::kStrided * stride_); add_pointer_offset(offset); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator++() { pointer_ += stride_ * InstructionShape::kStrided; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::Iterations::kContiguous; ++c) { int access_idx = c + s * Policy::Iterations::kContiguous; AccessType const *source_ptr = pointer_ + Policy::Delta::kContiguous * c + Policy::Delta::kStrided * s * stride_; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr); fetch_ptr[access_idx] = *source; } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * Shape::kContiguous + tile_offset.strided() * InstructionShape::kStrided * stride_; byte_offset += sizeof(AccessType) * pointer_offset; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { } }; //////////////////////////////////////////////////////////////////////////////// /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand== Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Underlying tile iterator implementation using Base = MmaTensorOpMultiplicandTileIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element, layout::TensorOpMultiplicandCongruous128b, layout::PitchLinearShape<InstructionShape::kColumn, InstructionShape::kRow>, kOpDelta, kThreads, PartitionsK_>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; private: /// Underlying tile iterator Base iterator_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): iterator_({ref.data(), ref.stride()}, lane_id) { } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { iterator_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator++() { ++iterator_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator--() { --iterator_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(layout::PitchLinearCoord(tile_offset.column(), tile_offset.row())); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(layout::PitchLinearCoord(-tile_offset.column(), -tile_offset.row())); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { iterator_.load_with_byte_offset( frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { iterator_.set_kgroup_index(k_group); } }; //////////////////////////////////////////////////////////////////////////////// /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand== Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Underlying tile iterator implementation using Base = MmaTensorOpMultiplicandTileIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element, layout::TensorOpMultiplicandCongruous128b, layout::PitchLinearShape<InstructionShape::kRow, InstructionShape::kColumn>, kOpDelta, kThreads, PartitionsK_>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; private: /// Underlying tile iterator Base iterator_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): iterator_({ref.data(), ref.stride()}, lane_id) { } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { iterator_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator++() { ++iterator_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator--() { --iterator_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(layout::PitchLinearCoord(tile_offset.row(), tile_offset.column())); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(layout::PitchLinearCoord(-tile_offset.row(), -tile_offset.column())); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { iterator_.load_with_byte_offset( frag, {tile_offset.contiguous(), tile_offset.strided()}, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { iterator_.set_kgroup_index(k_group); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// /// /// Partial specialization for complex<T> /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of underlying field of reals. typename RealElement, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions, concept: MatrixShape) typename OpDelta_> class MmaTensorOpAccumulatorTileIterator< Shape_, complex<RealElement>, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kC; /// Element type using Element = complex<RealElement>; /// Layout of source tile using Layout = cutlass::layout::RowMajor; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) using OpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static_assert( !(Shape::kRow % InstructionShape::kM) && !(Shape::kColumn % InstructionShape::kN), "Shape of warp-level Mma must be divisible by operator shape."); static_assert(platform::is_same<TensorCoord, MatrixCoord>::value, "Layouts must be defined for logical MatrixCoord coordinate space."); /// Number of mma operations performed using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM, Shape::kColumn / InstructionShape::kN>; }; private: // Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire // shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements // of that row. The accumulators within one row are assumed to be consecutive. static int const kElementsPerAccess = InstructionShape::kN / 4; static int const kRowsPerTile = 8; static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile. It is assumed that the accumulators /// are stored in a planar complex arrangement with the real parts as entirely contiguous /// followed by the imaginary parts. using Fragment = Array<RealElement, Shape::kCount / kThreads * 2>; static int const kRealIndex = 0; static int const kImaginaryIndex = Shape::kCount / kThreads; private: /// Reference to output tensor TensorRef ref_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator( TensorRef const &ref, int lane_id ): ref_(ref) { int quad = (lane_id >> 2); int lane_in_quad = (lane_id & 3); MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess); ref_.add_coord_offset(lane_offset); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator & operator++() { // deliberate no-op return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpAccumulatorTileIterator & operator--() { // deliberate no-op return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( Fragment &frag, ///< fragment to load from the tensor Index pointer_offset) const { ///< loads a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int mma_accum_start = kAccumulatorRows * kElementsPerAccess * (mma_n * Policy::MmaIterations::kRow + mma_m); CUTLASS_PRAGMA_UNROLL for (int row = 0; row < kAccumulatorRows; ++row) { CUTLASS_PRAGMA_UNROLL for (int col = 0; col < kElementsPerAccess; ++col) { int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + row * kRowsPerTile; int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; Element z = offset_ref.at({accum_m, accum_n}); frag[mma_accum_start + row * kElementsPerAccess + col + kRealIndex] = z.real(); frag[mma_accum_start + row * kElementsPerAccess + col + kImaginaryIndex] = z.imag(); } } } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( Fragment &frag, ///< fragment to load from the tensor Index byte_offset) const { ///< loads a tile with a linear offset load_with_pointer_offset(byte_offset / sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles load(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } /// Stores a fragment to memory CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } /// Stores a fragment to memory with additional pointer offset CUTLASS_DEVICE void store_with_pointer_offset( Fragment const &frag, ///< fragment to store from the tensor Index pointer_offset) const { ///< store a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int mma_accum_start = kAccumulatorRows * kElementsPerAccess * (mma_n * Policy::MmaIterations::kRow + mma_m); CUTLASS_PRAGMA_UNROLL for (int row = 0; row < kAccumulatorRows; ++row) { CUTLASS_PRAGMA_UNROLL for (int col = 0; col < kElementsPerAccess; ++col) { int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + row * kRowsPerTile; int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; int idx = mma_accum_start + row * kElementsPerAccess + col; Element z(frag[kRealIndex + idx], frag[kImaginaryIndex + idx]); offset_ref.at({accum_m, accum_n}) = z; } } } } } /// Stores a fragment to memory with additional pointer offset CUTLASS_DEVICE void store_with_byte_offset( Fragment const &frag, ///< fragment to store from the tensor Index byte_offset) const { ///< store a tile with a linear offset store_with_pointer_offset(byte_offset / sizeof(Element)); } /// Stores a fragment to memory with logical offset in units of whole tiles. CUTLASS_DEVICE void store( Fragment &frag, ///< fragment to store to the tensor TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles store(frag, tile_offset, 0); } /// Stores a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void store( /// fragment to store to the tensor Fragment const &frag, /// stores a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// stores a tile with a logical offset AND a pointer offset Index pointer_offset) const { store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for loading 128b vectors of 128b elements. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: PitchLinearShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: PitchLinearShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::TensorOpMultiplicandCrosswise128x4, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand== Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); static_assert(!(Shape::kContiguous % 4) && !(Shape::kStrided % 8), "Divisibility."); static_assert(sizeof_bits<Element_>::value == 128, "This is specialized for 128b accesses."); /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::TensorOpMultiplicandCrosswise128x4; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Load two elements per access static int const kElementsPerAccess = 1; /// Policy defining internal details of tile iterator struct Policy { /// Shape of one access using Delta = layout::PitchLinearShape<4, 8>; /// Number of iterations to load using Iterations = layout::PitchLinearShape< InstructionShape::kContiguous / Delta::kContiguous, Shape::kStrided / Delta::kStrided >; }; private: /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); /// Pointer type used for accesses using AccessType = AlignedArray<Element, kElementsPerAccess, 16>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Shape::kStrided * InstructionShape::kContiguous / kThreads>; private: /// Layout object storing stride values StrideIndex stride_; /// Shared memory base pointers - not advanced AccessType const *pointer_; /// Byte offset incremented as iterator advances Index byte_offset_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0) { int quad = lane_id / 4; int liq = lane_id % 4; int c = liq + (quad & 1) * 4; int s = (quad / 2); byte_offset_ = (c + s * stride_) * sizeof(AccessType); pointer_= reinterpret_cast<AccessType const *>(ref.data()); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { pointer_ += offset; return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { // Compute the offset in units of elements. Note, the external coordinate system is // approximately transposed with respect to the tiled internal structure int offset = (tile_offset.contiguous() * InstructionShape::kContiguous) * stride_ + (tile_offset.strided() * Shape::kStrided); add_pointer_offset(offset); byte_offset_ ^= (tile_offset.contiguous() & 1) * 4 * sizeof(AccessType); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator++() { pointer_ += stride_ * InstructionShape::kContiguous; byte_offset_ ^= 4 * sizeof(AccessType); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::Iterations::kContiguous; ++c) { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::Iterations::kStrided; ++s) { int access_idx = s + c * Policy::Iterations::kStrided; AccessType const *source_ptr = pointer_ + Policy::Delta::kContiguous * c * stride_ + Policy::Delta::kStrided * s; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr); fetch_ptr[access_idx] = *source; } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * InstructionShape::kContiguous * stride_ + tile_offset.strided() * Shape::kStrided; byte_offset += sizeof(AccessType) * pointer_offset; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { } }; //////////////////////////////////////////////////////////////////////////////// /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand== Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Underlying tile iterator implementation using Base = MmaTensorOpMultiplicandTileIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element, layout::TensorOpMultiplicandCrosswise128x4, layout::PitchLinearShape<InstructionShape::kColumn, InstructionShape::kRow>, kOpDelta, kThreads, PartitionsK_>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; private: /// Underlying tile iterator Base iterator_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): iterator_({ref.data(), ref.stride()}, lane_id) { } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { iterator_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator++() { ++iterator_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator--() { --iterator_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(layout::PitchLinearCoord(tile_offset.column(), tile_offset.row())); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(layout::PitchLinearCoord(-tile_offset.column(), -tile_offset.row())); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { iterator_.load_with_byte_offset( frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { iterator_.set_kgroup_index(k_group); } }; //////////////////////////////////////////////////////////////////////////////// /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand== Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Underlying tile iterator implementation using Base = MmaTensorOpMultiplicandTileIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element, layout::TensorOpMultiplicandCrosswise128x4, layout::PitchLinearShape<InstructionShape::kRow, InstructionShape::kColumn>, kOpDelta, kThreads, PartitionsK_>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; private: /// Underlying tile iterator Base iterator_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): iterator_({ref.data(), ref.stride()}, lane_id) { } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { iterator_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator++() { ++iterator_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator--() { --iterator_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(layout::PitchLinearCoord(tile_offset.row(), tile_offset.column())); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(layout::PitchLinearCoord(-tile_offset.row(), -tile_offset.column())); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { iterator_.load_with_byte_offset( frag, {tile_offset.contiguous(), tile_offset.strided()}, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { iterator_.set_kgroup_index(k_group); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// // Congruous shared memory layout // Warp-level iterators for complex<float>*complex<float> + complex<float> => complex<float> // The underlying iterators are similar to that for MMA f64*f64 + f64 = f64 ///////////////////////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for loading 128b vectors of 64b elements. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: PitchLinearShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Shape of one matrix product operation (concept: PitchLinearShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, cutlass::complex<float>, cutlass::layout::TensorOpMultiplicandCongruous64b, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand== Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); static_assert(!(Shape::kContiguous % 16) && !(Shape::kStrided % 8), "Divisibility."); /// Element type using Element = cutlass::complex<float>; /// Layout of source tile using Layout = cutlass::layout::TensorOpMultiplicandCongruous64b; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Load two elements per access static int const kElementsPerAccess = 2; /// Policy defining internal details of tile iterator struct Policy { /// Shape of one access using Delta = layout::PitchLinearShape<8, 4>; /// Number of iterations to load using Iterations = layout::PitchLinearShape< Shape::kContiguous / kElementsPerAccess / Delta::kContiguous, InstructionShape::kStrided / Delta::kStrided >; }; private: /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); /// Pointer type used for accesses using AccessType = AlignedArray<Element, kElementsPerAccess, 16>; /// Internal counter used to jump to next K partition int k_group_idx_; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>; private: /// Layout object storing stride values StrideIndex stride_; /// Shared memory base pointers - not advanced AccessType const *pointer_; /// Byte offset incremented as iterator advances Index byte_offset_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0), k_group_idx_(0) { int access_strided = lane_id / Policy::Delta::kContiguous; int access_contiguous = (lane_id % Policy::Delta::kContiguous) ^ access_strided; pointer_= reinterpret_cast<AccessType const *>(ref.data()) + access_contiguous + access_strided * stride_; } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += offset * sizeof(Element); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { int offset = (tile_offset.strided() * InstructionShape::kStrided) * stride_ * kElementsPerAccess + tile_offset.contiguous() * Shape::kContiguous; add_pointer_offset(offset); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator++() { add_tile_offset({0, 1}); return *this; } /// Advances the iterator along the opposite of the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator & operator--() { add_tile_offset({0, -1}); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::Iterations::kContiguous; ++c) { int access_idx = c + s * Policy::Iterations::kContiguous; AccessType const *source_ptr = pointer_ + Policy::Delta::kContiguous * c + Policy::Delta::kStrided * s * stride_; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr); fetch_ptr[access_idx] = *source; } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess + tile_offset.strided() * InstructionShape::kStrided * stride_; byte_offset += sizeof(AccessType) * pointer_offset; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { } }; //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// // Crosswise shared memory layout // Warp-level iterators for complex<float>*complex<float> + complex<float> => complex<float> // The underlying iterators are similar to that for f64*f64 + f64 = f64 ///////////////////////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for loading 128b vectors of 64b elements. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: PitchLinearShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Shape of one matrix product operation (concept: PitchLinearShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// Number of partitions along K dimension int PartitionsK_> class MmaTensorOpMultiplicandTileIterator< Shape_, Operand_, complex<float>, cutlass::layout::TensorOpMultiplicand64bCrosswise, InstructionShape_, OpDelta_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand== Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); static_assert(!(Shape::kContiguous % 4) && !(Shape::kStrided % 16), "Divisibility."); static_assert(sizeof_bits<complex<float>>::value == 64, "This is specialized for 64b accesses."); /// Element type using Element = complex<float>; /// Layout of source tile using Layout = cutlass::layout::TensorOpMultiplicand64bCrosswise; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Load two elements per access static int const kElementsPerAccess = 2; /// Policy defining internal details of tile iterator struct Policy { /// Shape of one access using Delta = layout::PitchLinearShape<4, 16>; /// Number of iterations to load using Iterations = layout::PitchLinearShape< InstructionShape::kContiguous / Delta::kContiguous, Shape::kStrided / Delta::kStrided >; }; private: /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); /// Pointer type used for accesses using AccessType = AlignedArray<Element, kElementsPerAccess, 16>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Shape::kStrided * InstructionShape::kContiguous / kThreads>; private: /// Layout object storing stride values StrideIndex stride_; /// Shared memory base pointers - not advanced AccessType const *pointer_; /// Byte offset incremented as iterator advances Index byte_offset_; /// Internal counter for tracking K-group Index k_group_idx_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0), k_group_idx_(0) { int access_strided = lane_id / 8; int access_contiguous = (lane_id % 8); byte_offset_ = (access_contiguous + access_strided * stride_) * sizeof(AccessType); pointer_= reinterpret_cast<AccessType const *>(ref.data()); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { pointer_ += offset / kElementsPerAccess; return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { int offset = (tile_offset.contiguous() * InstructionShape::kContiguous) * stride_ * kElementsPerAccess + tile_offset.strided() * Shape::kStrided; add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); if (k_group_idx_ & 1) byte_offset_ ^= 0x40; return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator++() { pointer_ += stride_ * InstructionShape::kContiguous; // xor ptr byte_offset_ ^= 0x40; ++k_group_idx_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::Iterations::kContiguous; ++c) { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::Iterations::kStrided; ++s) { int access_idx = c * Policy::Iterations::kStrided + s; AccessType const *source_ptr = pointer_ + Policy::Delta::kContiguous * c * stride_ + Policy::Delta::kStrided * s / kElementsPerAccess; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr); fetch_ptr[access_idx] = *source; } } Element *exchange_ptr = reinterpret_cast<Element *>(&frag); // exchange on 64b granularity only for fragments held in k=8/2 to k=8 CUTLASS_PRAGMA_UNROLL for (int i = Fragment::kElements/2; i < Fragment::kElements; i += 2) { Element tmp = exchange_ptr[i]; exchange_ptr[i] = exchange_ptr[i + 1]; exchange_ptr[i + 1] = tmp; } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * InstructionShape::kContiguous / Layout::kElementsPerAccess + tile_offset.strided() * Shape::kStrided * stride_; byte_offset += sizeof(AccessType) * pointer_offset; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { k_group_idx_ = k_group; } }; } // namespace warp } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h", "repo_id": "cutlass", "token_count": 25875 }
44
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_ref.h" #include "cutlass/matrix_shape.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor_op_multiplicand_sm70.h" #include "cutlass/platform/platform.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Operand identity Operand Operand, /// Data type of A elements typename Element_, /// Layout of operand typename Layout_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Delta between *MMA operations (in units of *MMA operations, concept: /// MatrixShape) int OpDelta_, /// Number of threads participating in one matrix operation int Threads> class MmaVoltaTensorOpMultiplicandTileIterator; ///////////////////////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: PitchLinearShape) typename Shape_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: PitchLinearShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_> class MmaVoltaTensorOpMultiplicandTileIterator< Shape_, Operand::kA, Element_, cutlass::layout::VoltaTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value>, InstructionShape_, OpDelta_, 32> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kA; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::VoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static_assert( !(Shape::kContiguous % InstructionShape::kContiguous), "Shape of warp-level Mma must be divisible by operator shape."); // Shape of one individual LDS.128 // TODO: 32 and 4 are hardcoded, 32-by-4 is logical shape using LdsShape = layout::PitchLinearShape< 32, 4 >; // LdsShapes are arranged in the strided direction in SMEM using LdsIterations = layout::PitchLinearShape< InstructionShape::kStrided / LdsShape::kStrided, Shape::kContiguous / LdsShape::kContiguous >; }; private: /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); /// Number of internal pointers needed to reference shared memory static int const kPointerCount = 2; /// Pointer type used for accesses using AccessType = AlignedArray<Element, Layout::kElementsPerAccess>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads * 2>; private: /// Layout object storing stride values StrideIndex stride_; /// Shared memory base pointers - not advanced AccessType const *pointer_[kPointerCount]; /// Byte offset incremented as iterator advances Index byte_offset_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } /// Constructor from TensorRef CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0) { // swizzle patterns for operandA LDS are // 1. (tid[4] << 3) | (tid[2:0] ^ tid[4]) // 2. (tid[4] << 3) | (tid[2:0] ^ tid[4] ^ 0b10010) int vec_row = (lane_id >> 4); // tid[4] int vec_col = ((lane_id & 4) >> 2); // tid[2] CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPointerCount; ++i) { if(i == 1) { vec_row |= 2; } int access_contiguous_idx = (vec_col << 2) | ((lane_id & 3) ^ vec_row); int access_contiguous = access_contiguous_idx; int access_strided = vec_row; pointer_[i] = reinterpret_cast<AccessType const *>(ref.data()) + access_contiguous + access_strided * stride_; } } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += offset * sizeof(Element); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { int contiguous_offset = tile_offset.contiguous(); int strided_offset = tile_offset.strided(); // To support 32x32 tile size if (Shape::kContiguous == Policy::LdsShape::kContiguous) { if (contiguous_offset % 2) { AccessType const *tmp_pointer = pointer_[0]; pointer_[0] = pointer_[1]; pointer_[1] = tmp_pointer; } contiguous_offset = contiguous_offset / 2 * 2; } int offset = (strided_offset * InstructionShape::kStrided) * stride_ * Layout::kElementsPerAccess + contiguous_offset * Shape::kContiguous; add_pointer_offset(offset); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator++() { byte_offset_ += stride_ * InstructionShape::kStrided * sizeof(Element) * Layout::kElementsPerAccess; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator--() { byte_offset_ -= stride_ * InstructionShape::kStrided * sizeof(Element) * Layout::kElementsPerAccess; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { AccessType * fetch_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) { int access_idx = c + s * Policy::LdsIterations::kContiguous; AccessType const *source_ptr = pointer_[s & 1] + Policy::LdsShape::kContiguous * c + Policy::LdsShape::kStrided * (s / 2) * stride_; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; fetch_ptr[access_idx] = *(reinterpret_cast<AccessType const*> (source_byte_ptr)); } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess + tile_offset.strided() * InstructionShape::kStrided * stride_; byte_offset += sizeof(AccessType) * pointer_offset; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; ////////////////////////////////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: PitchLinearShape) typename Shape_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: PitchLinearShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_> class MmaVoltaTensorOpMultiplicandTileIterator< Shape_, Operand::kB, Element_, cutlass::layout::VoltaTensorOpMultiplicandBCongruous< sizeof_bits<Element_>::value>, InstructionShape_, OpDelta_, 32> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kB; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::VoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { static_assert( !(Shape::kContiguous % InstructionShape::kContiguous), "Shape of warp-level Mma must be divisible by operator shape."); // Shape of one individual LDS // TODO: remove hardcoded 32 and 4 using LdsShape = layout::PitchLinearShape< 32, 4 >; using LdsIterations = layout::PitchLinearShape< Shape::kContiguous / LdsShape::kContiguous, InstructionShape::kStrided / LdsShape::kStrided >; }; private: /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); /// Pointer type used for accesses using AccessType = AlignedArray<Element, Layout::kElementsPerAccess>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile, needs on more time number of registers using Fragment = Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads * 2>; private: /// Layout object storing stride values StrideIndex stride_; /// Shared memory base pointers - not advanced AccessType const *pointer_; /// Byte offset incremented as iterator advances Index byte_offset_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } /// Constructor from TensorRef CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): stride_(ref.stride(0) / Layout::kElementsPerAccess), byte_offset_(0) { // swizzle pattern is (tid & (3 << 3) | (tid[1:0] ^ tid[4:3])) int access_strided = (lane_id >> 3) & 0x3; int access_contiguous = ((lane_id ^ (lane_id >> 3)) & 0x3); pointer_ = reinterpret_cast<AccessType const *>(ref.data()) + access_contiguous + access_strided * stride_; } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += offset * sizeof(Element); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { int contiguous_offset = tile_offset.contiguous(); int strided_offset = tile_offset.strided(); int offset = (strided_offset * InstructionShape::kStrided) * stride_ * Layout::kElementsPerAccess + contiguous_offset * Shape::kContiguous; add_pointer_offset(offset); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator++() { byte_offset_ += stride_ * InstructionShape::kStrided * sizeof(Element) * Layout::kElementsPerAccess; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator--() { byte_offset_ += stride_ * InstructionShape::kStrided * sizeof(Element) * Layout::kElementsPerAccess; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { AccessType * fetch_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) { int access_idx = c + s * Policy::LdsIterations::kContiguous; AccessType const *source_ptr = pointer_ + Policy::LdsShape::kContiguous / Layout::kElementsPerAccess * c + Policy::LdsShape::kStrided * s * stride_; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; fetch_ptr[access_idx] = *(reinterpret_cast<AccessType const*> (source_byte_ptr)); } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess + tile_offset.strided() * InstructionShape::kStrided * stride_; byte_offset += sizeof(AccessType) * pointer_offset; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; ////////////////////////////////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared /// memory and therefore must be initialized with a TensorRef to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_> class MmaVoltaTensorOpMultiplicandTileIterator< Shape_, Operand::kA, Element_, cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value>, InstructionShape_, OpDelta_, 32> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kA; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Underlying tile iterator implementation using Base = MmaVoltaTensorOpMultiplicandTileIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element, layout::VoltaTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value>, layout::PitchLinearShape<InstructionShape::kRow, InstructionShape::kColumn>, kOpDelta, kThreads>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; private: /// Underlying tile iterator Base iterator_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): iterator_({ref.data(), ref.stride()}, lane_id) { } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { iterator_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator++() { ++iterator_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator--() { --iterator_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column())); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column())); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { iterator_.load_with_byte_offset( frag, {tile_offset.contiguous(), tile_offset.strided()}, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { iterator_.set_kgroup_index(k_group); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to load from shared /// memory and therefore must be initialized with a TensorRef to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_> class MmaVoltaTensorOpMultiplicandTileIterator< Shape_, Operand::kB, Element_, cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous< sizeof_bits<Element_>::value>, InstructionShape_, OpDelta_, 32> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kB; static_assert(kOperand == Operand::kA || kOperand== Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Underlying tile iterator implementation using Base = MmaVoltaTensorOpMultiplicandTileIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element, layout::VoltaTensorOpMultiplicandBCongruous<sizeof_bits<Element_>::value>, layout::PitchLinearShape<InstructionShape::kColumn, InstructionShape::kRow>, kOpDelta, kThreads>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; private: /// Underlying tile iterator Base iterator_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): iterator_({ref.data(), ref.stride()}, lane_id) { } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { iterator_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator++() { ++iterator_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator--() { --iterator_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row())); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row())); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { iterator_.load_with_byte_offset( frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { iterator_.set_kgroup_index(k_group); } }; //////////////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It is used to load or store /// accumulators from memory and is agnostic to layout. It could be faster if it assumed row-major /// accumulator layout. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept | /// WriteableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Layout of operand in memory typename Layout_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions, concept: MatrixShape) typename OpDelta_> class MmaVoltaTensorOpAccumulatorTileIterator { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kC; /// Element type using Element = Element_; /// Layout of source tile using Layout = Layout_; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) using OpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { /// Volta Tensor Op uses 32x32 interleaved tile using InterleavedTile = MatrixShape<32, 32>; static_assert(!(Shape::kRow % InterleavedTile::kRow) && !(Shape::kColumn % InterleavedTile::kColumn), "Shape of warp-level Mma must be divisible by operator shape."); static_assert(platform::is_same<TensorCoord, MatrixCoord>::value, "Layouts must be defined for logical MatrixCoord coordinate space."); /// Number of mma operations performed using TileIterations = MatrixShape< Shape::kRow / InterleavedTile::kRow, Shape::kColumn / InterleavedTile::kColumn >; using MmaIterations = MatrixShape<InterleavedTile::kRow / InstructionShape::kM, InterleavedTile::kColumn / InstructionShape::kN>; }; private: // Assume accumulator tile is multipile interleaved 32x32 tile. static int const kElementsPerPartial = 4; using EleShapePerPatial = typename platform::conditional< platform::is_same<Element, float>::value, MatrixShape<2, 2>, MatrixShape<1, 4> >::type; static int const kElementsPerMma = 8; static int const kAccumulatorPatials = 2; using QuadShapePerPatialMma = MatrixShape<4, 4>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Shape::kCount / kThreads>; private: /// Reference to output tensor TensorRef ref_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaVoltaTensorOpAccumulatorTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaVoltaTensorOpAccumulatorTileIterator( TensorRef const &ref, int lane_id ): ref_(ref) { int quad = (lane_id >> 2); int lane_in_quad = (lane_id & 3); int accum_m, accum_n; if (platform::is_same<Element, float>::value) { // (quad[2],quad[0])+lane_in_quad[0] accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1); // (quad[1])+lane_in_quad[1] accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials + (lane_in_quad & 2); } else { accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + lane_in_quad; // (quad[2],quad[0]) accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials; } MatrixCoord lane_offset(accum_m, accum_n); ref_.add_coord_offset(lane_offset); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaVoltaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaVoltaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpAccumulatorTileIterator & operator++() { // deliberate no-op return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpAccumulatorTileIterator & operator--() { // deliberate no-op return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaVoltaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaVoltaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_HOST_DEVICE void load_with_pointer_offset( Fragment &frag, ///< fragment to load from the tensor Index pointer_offset) const { ///< loads a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); CUTLASS_PRAGMA_UNROLL for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) { CUTLASS_PRAGMA_UNROLL for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) { CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int mma_accum_start = (((tile_n * Policy::TileIterations::kRow + tile_m) * Policy::MmaIterations::kColumn + mma_n) * Policy::MmaIterations::kRow + mma_m) * kElementsPerMma; CUTLASS_PRAGMA_UNROLL for (int p = 0; p < kAccumulatorPatials; ++p) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < EleShapePerPatial::kRow; ++m) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < EleShapePerPatial::kColumn; ++n) { int accum_m = tile_m * Policy::InterleavedTile::kRow + mma_m * QuadShapePerPatialMma::kRow + m * 2; int accum_n = tile_n * Policy::InterleavedTile::kColumn + mma_n * QuadShapePerPatialMma::kColumn + p * Policy::InterleavedTile::kColumn/2 + n; int idx = mma_accum_start + p * kElementsPerPartial + m * EleShapePerPatial::kColumn + n; frag[idx] = offset_ref.at({accum_m, accum_n}); } } } } } } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( Fragment &frag, ///< fragment to load from the tensor Index byte_offset) const { ///< loads a tile with a linear offset load_with_pointer_offset(byte_offset / sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_HOST_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles load(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_HOST_DEVICE void load( Fragment &frag, ///< fragment to load from the tensor TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } /// Stores a fragment to memory CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } /// Stores a fragment to memory with additional pointer offset CUTLASS_HOST_DEVICE void store_with_pointer_offset( Fragment const &frag, ///< fragment to store from the tensor Index pointer_offset) const { ///< store a tile with a linear offset TensorRef offset_ref(ref_); offset_ref.add_pointer_offset(pointer_offset); CUTLASS_PRAGMA_UNROLL for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) { CUTLASS_PRAGMA_UNROLL for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) { CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int mma_accum_start = (((tile_n * Policy::TileIterations::kRow + tile_m) * Policy::MmaIterations::kColumn + mma_n) * Policy::MmaIterations::kRow + mma_m) * kElementsPerMma; CUTLASS_PRAGMA_UNROLL for (int p = 0; p < kAccumulatorPatials; ++p) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < EleShapePerPatial::kRow; ++m) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < EleShapePerPatial::kColumn; ++n) { int accum_m = tile_m * Policy::InterleavedTile::kRow + mma_m * QuadShapePerPatialMma::kRow + m * 2; int accum_n = tile_n * Policy::InterleavedTile::kColumn + mma_n * QuadShapePerPatialMma::kColumn + p * Policy::InterleavedTile::kColumn/2 + n; int idx = mma_accum_start + p * kElementsPerPartial + m * EleShapePerPatial::kColumn + n; offset_ref.at({accum_m, accum_n}) = frag[idx]; } } } } } } } } /// Stores a fragment to memory with additional pointer offset CUTLASS_HOST_DEVICE void store_with_byte_offset( Fragment const &frag, ///< fragment to store from the tensor Index byte_offset) const { ///< store a tile with a linear offset store_with_pointer_offset(byte_offset / sizeof(Element)); } /// Stores a fragment to memory with logical offset in units of whole tiles. CUTLASS_HOST_DEVICE void store( Fragment &frag, ///< fragment to store to the tensor TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles store(frag, tile_offset, 0); } /// Stores a fragment from memory with logical offset in units of whole tiles. CUTLASS_HOST_DEVICE void store( /// fragment to store to the tensor Fragment const &frag, /// stores a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// stores a tile with a logical offset AND a pointer offset Index pointer_offset) const { store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); } }; /// This tile iterator is specialized for 32-thread TensorOps. It uses LDS to /// load from shared memory and therefore must be initialized with a TensorRef /// to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: PitchLinearShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: PitchLinearShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// KBlock size (in units of elements) int KBlock> class MmaVoltaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::VoltaTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, KBlock>, InstructionShape_, OpDelta_, 32> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand == Operand::kB, "MmaVoltaTensorOpMultiplicandIterator may only be instantiated for " "A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// KBlock size static int const kKBlock = KBlock; /// Layout of source tile using Layout = cutlass::layout::VoltaTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, kKBlock>; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: /// MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Long Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection struct Policy { /// Shape of one individual LDS instruction using LdsShape = layout::PitchLinearShape<1, 32>; /// Number and arrangement of LDSM instructions using LdsIterations = layout::PitchLinearShape<1, Shape::kStrided / 32>; /// Using LDS.128 static int const kElementsPerAccess = 8; /// Contiguous elements per line static int const kContiguousElementsPerLine = 4; }; private: /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); /// Pointer type used for accesses using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, Shape::kStrided * InstructionShape::kContiguous / kThreads * 2>; private: /// Layout object storing stride values StrideIndex stride_; /// Shared memory base pointers - not advanced AccessType const *pointer_; /// Byte offset incremented as iterator advances Index byte_offset_; /// Crosswised elements are arranged in a SMEM line /// in units of AccessType Index line_size; /// Internal counter used to determine load addr offset /// and when to swap higher 64bit with lower 64bit int k_group_idx_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator() : pointer_(nullptr), stride_(0), line_size(0), byte_offset_(0), k_group_idx_(0) {} /// Constructor from TensorRef CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id) : pointer_(reinterpret_cast<AccessType const *>(ref.data())), stride_(ref.stride(0) * Policy::kElementsPerAccess), line_size((ref.stride(0) * Policy::kContiguousElementsPerLine) / Policy::kElementsPerAccess), k_group_idx_(0), byte_offset_(0) { int quad = (lane_id / 4); int lane_in_quad = (lane_id % 4); int access_contiguous; if(kOperand == Operand::kA) { // swizzle id: tid[4]|tid[1:0]|(tid[2]^tid[4]) access_contiguous = ((quad & 0x4) << 1) + ((lane_in_quad) << 1) + ((quad & 0x1) ^ ((quad & 0x4) >> 2)); } else { // swizzle id: tid[4]|tid[1:0]|tid[3] access_contiguous = ((quad & 0x4) << 1) + (lane_in_quad << 1) + ((quad & 0x2) >> 1 ^ ((quad & 0x4) >> 2)); } byte_offset_ = access_contiguous * sizeof(Element) * Policy::kElementsPerAccess; } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += offset * sizeof(Element); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset( TensorCoord const &tile_offset) { int contiguous_offset = tile_offset.contiguous(); int strided_offset = tile_offset.strided(); k_group_idx_ = 0; pointer_ += contiguous_offset * (InstructionShape::kContiguous / Policy::kContiguousElementsPerLine) * line_size + strided_offset * Shape::kStrided / 2; return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &operator++() { k_group_idx_ = (k_group_idx_ + 1) % 8; if (k_group_idx_ == 4 || k_group_idx_ == 0) { byte_offset_ ^= 1 * sizeof(Element) * Policy::kElementsPerAccess; } pointer_ += line_size; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &operator--() { assert(0); } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &operator+=( TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &operator-=( TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { AccessType * fetch_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::LdsIterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::LdsIterations::kContiguous; ++c) { int access_idx = c + s * Policy::LdsIterations::kContiguous; AccessType const *source_ptr = pointer_ + Policy::LdsShape::kContiguous * c * line_size + Policy::LdsShape::kStrided * s / 2; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; fetch_ptr[access_idx] = *(reinterpret_cast<AccessType const*> (source_byte_ptr)); // swap higher 64bit and lower 64bit if (k_group_idx_ & 0x2) { uint64_t *low = reinterpret_cast<uint64_t *>(&frag) + access_idx * 2; uint64_t *high = reinterpret_cast<uint64_t *>(&frag) + access_idx * 2 + 1; uint64_t tmp = *low; *low = *high; *high = tmp; } } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * InstructionShape::kContiguous / Policy::kElementsPerAccess + tile_offset.strided() * Shape::kStrided * stride_; byte_offset += sizeof(AccessType) * pointer_offset; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { k_group_idx_ = k_group; } }; /// This tile iterator is specialized for 32-thread TensorOps. It uses LDS to /// load from shared memory and therefore must be initialized with a TensorRef /// to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// KBlock size (in units of elements) int KBlock> class MmaVoltaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, KBlock>, InstructionShape_, OpDelta_, 32> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand == Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for " "A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// KBlock size static int const kKBlock = KBlock; /// Layout of source tile using Layout = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, kKBlock>; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: /// MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Underlying tile iterator implementation using Base = MmaVoltaTensorOpMultiplicandTileIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element, layout::VoltaTensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value, kKBlock>, layout::PitchLinearShape<InstructionShape::kRow, InstructionShape::kColumn>, kOpDelta, kThreads>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; private: /// Underlying tile iterator Base iterator_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator() {} /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id) : iterator_({ref.data(), ref.stride()}, lane_id) {} /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { iterator_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset( TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &operator++() { ++iterator_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &operator--() { --iterator_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &operator+=( TensorCoord const &tile_offset) { add_tile_offset(PitchLinearCoord(tile_offset.row(), tile_offset.column())); return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &operator-=( TensorCoord const &tile_offset) { add_tile_offset(-PitchLinearCoord(tile_offset.row(), tile_offset.column())); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { assert(0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { assert(0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { iterator_.load_with_byte_offset( frag, {tile_offset.contiguous(), tile_offset.strided()}, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { iterator_.set_kgroup_index(k_group); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It uses LDS to /// load from shared memory and therefore must be initialized with a TensorRef /// to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Identifies A or B multiplicand Operand Operand_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_, /// KBlock size (in units of elements) int KBlock> class MmaVoltaTensorOpMultiplicandTileIterator< Shape_, Operand_, Element_, cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, KBlock>, InstructionShape_, OpDelta_, 32> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; static_assert(kOperand == Operand::kA || kOperand == Operand::kB, "MmaTensorOpMultiplicandIterator may only be instantiated for " "A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// KBlock size static int const kKBlock = KBlock; /// Layout of source tile using Layout = cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, kKBlock>; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: /// MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Underlying tile iterator implementation using Base = MmaVoltaTensorOpMultiplicandTileIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element, layout::VoltaTensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value, kKBlock>, layout::PitchLinearShape<InstructionShape::kColumn, InstructionShape::kRow>, kOpDelta, kThreads>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; private: /// Underlying tile iterator Base iterator_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator() {} /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator(TensorRef const &ref, int lane_id) : iterator_({ref.data(), ref.stride()}, lane_id) {} /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { iterator_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &add_tile_offset( TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &operator++() { ++iterator_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &operator--() { --iterator_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &operator+=( TensorCoord const &tile_offset) { add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row())); return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIterator &operator-=( TensorCoord const &tile_offset) { add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row())); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { assert(0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { assert(0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { iterator_.load_with_byte_offset( frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { iterator_.set_kgroup_index(k_group); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for 'TN' arrangement template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Operand identity Operand Operand_, /// Data type of A elements typename Element_, /// Layout of matrix operand typename Layout_, /// Shape of one matrix production operation (concept: MatrixShape) typename InstructionShape_, /// Delta between *MMA operations (in units of *MMA operations, concept: /// MatrixShape) int OpDelta_, /// Number of threads participating in one matrix operation int Threads = 32, /// Number of partitions along K dimension int PartitionsK_ = 1> class MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; /// Basic check static_assert(kOperand == Operand::kA || kOperand== Operand::kB, "MmaVoltaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// Layout of source tile using Layout = Layout_; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Number of elements accessed per Shared Memory load static int const kElementsPerAccess = 4; private: static int const kInterleavedTileRows = 32; static int const kInterleavedTileColumns = 32; static int const kInstructionsPerTile = 2; /// Rounded up instruction counts using TileCount = MatrixShape< Shape::kRow / kInterleavedTileRows, Shape::kColumn / kInterleavedTileColumns >; using FragmentCount = MatrixShape< TileCount::kRow * kInstructionsPerTile, TileCount::kColumn * kInstructionsPerTile >; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array< Element, (kOperand == Operand::kA ? FragmentCount::kRow : FragmentCount::kColumn) * kElementsPerAccess >; /// Memory access type using AccessType = AlignedArray<Element, kElementsPerAccess>; private: /// Underlying tensor reference TensorRef ref_; /// Extent of tensor MatrixCoord extent_; /// Origin MatrixCoord origin_; /// Used to conditionally enable extents checking bool divisible_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner(): divisible_(true) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner( TensorRef const &ref, int lane_id ): ref_(ref), extent_(Shape::kRow, Shape::kColumn), divisible_(true) { int quad_id = lane_id / 4; int lane_in_quad = (lane_id % 4); if (kOperand == Operand::kA) { int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile + lane_in_quad; int col_idx = 0; origin_ = MatrixCoord(row_idx, col_idx); } else { int row_idx = 0; int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile + lane_in_quad; origin_ = MatrixCoord(row_idx, col_idx); } ref_.add_coord_offset(origin_); } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner( TensorRef const &ref, TensorCoord extent, int lane_id ): ref_(ref), extent_(extent), divisible_(false) { int quad_id = lane_id / 4; int lane_in_quad = (lane_id % 4); if (kOperand == Operand::kA) { int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile + lane_in_quad; int col_idx = 0; origin_ = MatrixCoord(row_idx, col_idx); } else { int row_idx = 0; int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile + lane_in_quad; origin_ = MatrixCoord(row_idx, col_idx); } #if defined(__CUDA_ARCH__) __syncthreads(); #endif ref_.add_coord_offset(origin_); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner &add_tile_offset(TensorCoord const &tile_offset) { TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); origin_ += coord_offset; ref_.add_coord_offset(coord_offset); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator++() { if (kOperand == Operand::kA) { add_tile_offset({0, 1}); } else { add_tile_offset({1, 0}); } return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator--() { if (kOperand == Operand::kA) { add_tile_offset({0, -1}); } else { add_tile_offset({-1, 0}); } return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); AccessType const *access_ptr = reinterpret_cast<AccessType const *>(ref_.data()); int ldm = ref_.stride()[0]; if (kOperand == Operand::kA) { CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < FragmentCount::kRow; ++idx) { int tile_idx = idx / 2; int quad_idx = idx % 2; int row_offset = tile_idx * kInterleavedTileRows + quad_idx * 4; frag_ptr[idx] = access_ptr[row_offset * ldm / kElementsPerAccess]; } } else { CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < FragmentCount::kColumn; ++idx) { int tile_idx = idx / 2; int quad_idx = idx % 2; int col_offset = tile_idx * kInterleavedTileColumns + quad_idx * 4; frag_ptr[idx] = access_ptr[col_offset * ldm / kElementsPerAccess]; } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { load_with_pointer_offset(frag, byte_offset * 8 / sizeof_bits<Element>::value); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); load_with_pointer_offset(frag, ref_.offset(coord_offset)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); load_with_pointer_offset(frag, ref_.offset(coord_offset) + pointer_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); load_with_pointer_offset(frag, ref_.offset(coord_offset) + byte_offset * 8 / sizeof_bits<Element>::value); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation } }; /// Tile iterator specialized for 'NT' arrangement template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Operand identity Operand Operand_, /// Data type of A elements typename Element_, /// Layout of matrix operand typename Layout_, /// Shape of one matrix production operation (concept: MatrixShape) typename InstructionShape_, /// Delta between *MMA operations (in units of *MMA operations, concept: /// MatrixShape) int OpDelta_, /// Number of threads participating in one matrix operation int Threads = 32, /// Number of partitions along K dimension int PartitionsK_ = 1> class MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand_; /// Basic check static_assert(kOperand == Operand::kA || kOperand== Operand::kB, "MmaVoltaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); /// Element type using Element = Element_; /// Layout of source tile using Layout = Layout_; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) static int const kOpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Number of elements accessed per Shared Memory load static int const kElementsPerAccess = 4; private: static int const kInterleavedTileRows = 32; static int const kInterleavedTileColumns = 32; static int const kInstructionsPerTile = 2; /// Rounded up instruction counts using TileCount = MatrixShape< Shape::kRow / kInterleavedTileRows, Shape::kColumn / kInterleavedTileColumns >; using FragmentCount = MatrixShape< TileCount::kRow * kInstructionsPerTile, TileCount::kColumn * kInstructionsPerTile >; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array< Element, (kOperand == Operand::kA ? FragmentCount::kRow : FragmentCount::kColumn) * kElementsPerAccess >; /// Memory access type using AccessType = AlignedArray<Element, kElementsPerAccess>; private: /// Underlying tensor reference TensorRef ref_; /// Extent of tensor MatrixCoord extent_; /// Origin MatrixCoord origin_; /// Used to conditionally enable extents checking bool divisible_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter(): divisible_(true) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter( TensorRef const &ref, int lane_id ): ref_(ref), extent_(Shape::kRow, Shape::kColumn), divisible_(true) { int quad_id = lane_id / 4; int lane_in_quad = (lane_id % 4); if (kOperand == Operand::kA) { int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile; int col_idx = lane_in_quad; origin_ = MatrixCoord(row_idx, col_idx); } else { int row_idx = lane_in_quad; int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile; origin_ = MatrixCoord(row_idx, col_idx); } ref_.add_coord_offset(origin_); } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter( TensorRef const &ref, TensorCoord extent, int lane_id ): ref_(ref), extent_(extent), divisible_(false) { int quad_id = lane_id / 4; int lane_in_quad = (lane_id % 4); if (kOperand == Operand::kA) { int row_idx = ((quad_id & 1) + ((quad_id & 4) / 2)) * 4 * kInstructionsPerTile; int col_idx = lane_in_quad; origin_ = MatrixCoord(row_idx, col_idx); } else { int row_idx = lane_in_quad; int col_idx = (quad_id / 2) * 4 * kInstructionsPerTile; origin_ = MatrixCoord(row_idx, col_idx); } #if defined(__CUDA_ARCH__) __syncthreads(); #endif ref_.add_coord_offset(origin_); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter &add_tile_offset(TensorCoord const &tile_offset) { TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); origin_ += coord_offset; ref_.add_coord_offset(coord_offset); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator++() { if (kOperand == Operand::kA) { add_tile_offset({0, 1}); } else { add_tile_offset({1, 0}); } return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator--() { if (kOperand == Operand::kA) { add_tile_offset({0, -1}); } else { add_tile_offset({-1, 0}); } return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); AccessType const *access_ptr = reinterpret_cast<AccessType const *>(ref_.data()); int ldm = ref_.stride()[0]; if (kOperand == Operand::kA) { CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < FragmentCount::kRow; ++idx) { int tile_idx = idx / 2; int quad_idx = idx % 2; int row_offset = tile_idx * kInterleavedTileRows; frag_ptr[idx] = access_ptr[row_offset / kElementsPerAccess + quad_idx]; } } else { CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < FragmentCount::kColumn; ++idx) { int tile_idx = idx / 2; int quad_idx = idx % 2; int col_offset = tile_idx * kInterleavedTileColumns; frag_ptr[idx] = access_ptr[col_offset / kElementsPerAccess + quad_idx]; } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { load_with_pointer_offset(frag, byte_offset * 8 / sizeof_bits<Element>::value); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); load_with_pointer_offset(frag, ref_.offset(coord_offset)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); load_with_pointer_offset(frag, ref_.offset(coord_offset) + pointer_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { TensorCoord coord_offset(tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn); load_with_pointer_offset(frag, ref_.offset(coord_offset) + byte_offset * 8 / sizeof_bits<Element>::value); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_> class MmaVoltaTensorOpMultiplicandTileIterator< Shape_, Operand::kA, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_, 32 > : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner< Shape_, Operand::kA, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> { public: using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner< Shape_, Operand::kA, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> ; using TensorRef = typename Base::TensorRef; /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): Base(ref, lane_id) { } }; template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_> class MmaVoltaTensorOpMultiplicandTileIterator< Shape_, Operand::kA, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_, 32 > : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter< Shape_, Operand::kA, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_> { public: using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter< Shape_, Operand::kA, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_> ; using TensorRef = typename Base::TensorRef; /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): Base(ref, lane_id) { } }; template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_> class MmaVoltaTensorOpMultiplicandTileIterator< Shape_, Operand::kB, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_, 32 > : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner< Shape_, Operand::kB, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_> { public: using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalInner< Shape_, Operand::kB, Element_, cutlass::layout::ColumnMajor, InstructionShape_, OpDelta_>; using TensorRef = typename Base::TensorRef; /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): Base(ref, lane_id) { } }; template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions) int OpDelta_> class MmaVoltaTensorOpMultiplicandTileIterator< Shape_, Operand::kB, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_, 32 > : public MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter< Shape_, Operand::kB, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> { public: using Base = MmaVoltaTensorOpMultiplicandTileIteratorCanonicalOuter< Shape_, Operand::kB, Element_, cutlass::layout::RowMajor, InstructionShape_, OpDelta_>; using TensorRef = typename Base::TensorRef; /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaVoltaTensorOpMultiplicandTileIterator( TensorRef const &ref, int lane_id ): Base(ref, lane_id) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h", "repo_id": "cutlass", "token_count": 35217 }
45
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/detail/dependent_false.hpp" #include "cute/numeric/integral_constant.hpp" #include "cute/arch/cluster_sm90.hpp" #include "cutlass/arch/barrier.h" #include "cute/util/type_traits.hpp" #include "cute/container/array.hpp" //////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { //////////////////////////////////////////////////////////////////////////////////////////////////// using namespace cute; enum class BarrierStatus : uint32_t { WaitAgain = 0u, WaitDone = 1u, WaitOnly = 2u }; class ArrivalToken { public: CUTLASS_HOST_DEVICE ArrivalToken(BarrierStatus barrier_status) : barrier_status_(barrier_status) {} CUTLASS_HOST_DEVICE ArrivalToken() = delete; CUTLASS_HOST_DEVICE BarrierStatus get() const { return barrier_status_;; } CUTLASS_HOST_DEVICE bool operator==(ArrivalToken const& other) const { return barrier_status_ == other.get(); } private: BarrierStatus barrier_status_; CUTLASS_HOST_DEVICE friend bool operator==(const ArrivalToken& left, const BarrierStatus& right) { return left.get() == right; } CUTLASS_HOST_DEVICE friend bool operator==(const BarrierStatus& left, const ArrivalToken& right) { return left == right.get(); } CUTLASS_HOST_DEVICE friend bool operator!=(const ArrivalToken& left, const BarrierStatus& right) { return left.get() != right; } CUTLASS_HOST_DEVICE friend bool operator!=(const BarrierStatus& left, const ArrivalToken& right) { return left != right.get(); } }; class ProducerToken : public ArrivalToken { using ArrivalToken::ArrivalToken; }; class ConsumerToken : public ArrivalToken { using ArrivalToken::ArrivalToken; }; // Circular Buffer Index + Associated Phase // Assumes only one operation possible - i.e., ++ template<uint32_t Stages_> struct PipelineState { static constexpr uint32_t Stages = Stages_; int index_ = 0; uint32_t phase_ = 0; uint32_t count_ = 0; CUTLASS_DEVICE PipelineState(): index_{}, phase_{}, count_{} {} CUTLASS_DEVICE PipelineState(int index, uint32_t phase, uint32_t count) : index_(index) , phase_(phase) , count_(count) {} CUTLASS_DEVICE int index() const { return index_; } CUTLASS_DEVICE uint32_t phase() const { return phase_; } CUTLASS_DEVICE uint32_t count() const { return count_; } CUTLASS_DEVICE void operator++() { if constexpr (Stages > 0) { ++index_; ++count_; if (index_ == Stages) { index_ = 0; phase_ ^= 1; } } } CUTLASS_DEVICE PipelineState& operator+=(uint32_t num_iterations) { return advance(num_iterations); } CUTLASS_DEVICE PipelineState& operator=(PipelineState const& other) { index_ = other.index(); phase_ = other.phase(); count_ = other.count(); return *this; } CUTLASS_DEVICE PipelineState& advance(uint32_t num_iterations) { if constexpr (Stages > 0) { // Number of iterations cross over the stage boundary => flipped phase if ((num_iterations < Stages) && (index_ + num_iterations) >= Stages ) { phase_ ^= 1; } // How many times number of iterations cross over the stage boundary and // end up on a odd number => flipped phase if ((num_iterations >= Stages) && (((index_ + num_iterations) / Stages) % 2) == 1) { phase_ ^= 1; } index_ = (index_ + num_iterations) % Stages; count_ += num_iterations; } return *this; } CUTLASS_DEVICE static PipelineState make_pipeline_state(PipelineState start_state, uint32_t num_iterations) { return start_state.advance(num_iterations); } }; template<class Pipeline> CUTLASS_DEVICE PipelineState<Pipeline::Stages> make_producer_start_state() { // Producer starts with an opposite phase as the buffers are initially empty constexpr int InitialProducerStage = 0; constexpr uint32_t InitialProducerPhase = 1; constexpr uint32_t InitialProducerCount = 0; return {InitialProducerStage, InitialProducerPhase, InitialProducerCount}; } /////////////////////////////////////////////////////////////////////////////////////////////////// // // TMA load (producer) Async Pipeline class // /////////////////////////////////////////////////////////////////////////////////////////////////// // Assumptions : Constructor is visible Cluster-wide (as it needs a Cluster-Sync) // We have exactly one thread elected in the Producer as the "leader" // Currently, it is optional to elect a leader for the Consumers template <int Stages_> class PipelineTmaAsync { public : using FullBarrier = cutlass::arch::ClusterTransactionBarrier; using EmptyBarrier = cutlass::arch::ClusterBarrier; using ProducerBarrierType = FullBarrier::ValueType; using ConsumerBarrierType = EmptyBarrier::ValueType; static constexpr uint32_t Stages = Stages_; using PipelineState = cutlass::PipelineState<Stages>; struct SharedStorage { FullBarrier full_barrier_[Stages]; EmptyBarrier empty_barrier_[Stages]; }; enum class ThreadCategory { NonParticipant, Producer, Consumer, ProducerConsumer }; struct Params { uint32_t transaction_bytes = 0; ThreadCategory role = ThreadCategory::NonParticipant; uint32_t is_leader = 0; uint32_t num_consumers = 0; }; // Constructor template<typename ClusterShape> CUTLASS_DEVICE PipelineTmaAsync(SharedStorage& storage, Params params, ClusterShape cluster_shape) : params_(params) , full_barrier_ptr_(&storage.full_barrier_[0]) , empty_barrier_ptr_(&storage.empty_barrier_[0]) { int warp_idx = canonical_warp_idx(); int lane_predicate = cute::elect_one_sync(); if (warp_idx == 0 && lane_predicate == 1) { // Barrier FULL init for (int i = 0; i < Stages; ++i) { full_barrier_ptr_[i].init(1); } uint32_t const num_consumer_warpgroups_per_cluster = params_.num_consumers / NumThreadsPerWarpGroup; uint32_t const multicast_consumer_arrival_count = (cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1) * num_consumer_warpgroups_per_cluster; // Barrier EMPTY init for (int i = 0; i < Stages; ++i) { empty_barrier_ptr_[i].init(multicast_consumer_arrival_count); } } cutlass::arch::fence_barrier_init(); // Logic to optimally schedule Empty Arrives // Goal : To divide SYNCS Empty Arrival duty equally amongst the Warp-Group (128 threads) dim3 block_id = cute::block_id_in_cluster(); auto cluster_size = cute::size(cluster_shape); static constexpr int MaxClusterSize = 16; // STEP 1 : Use Cute Layout function to generate an optimal dst block-id (0-15) if (params_.num_consumers % NumThreadsPerWarpGroup == 0) { int thread_idx = threadIdx.x % NumThreadsPerWarpGroup; is_signalling_thread_ = (thread_idx % (NumThreadsPerWarpGroup / MaxClusterSize)) == 0; auto layout = cute::composition(Swizzle<2,0,-2>{}, Layout<Shape<_4,_4>,Stride<_4,_1>>{}); uint32_t thread_row = warp_idx % 4; uint32_t thread_col = (thread_idx / 8) % 4; dst_blockid_ = layout(thread_row, thread_col); } else if (params_.num_consumers == 32) { int thread_idx = threadIdx.x % 32; is_signalling_thread_ = (thread_idx % (32 / MaxClusterSize)) == 0; auto layout = Layout<Shape<_4,_4>,Stride<_4, _1>>{}; uint32_t thread_row = thread_idx / 8; uint32_t thread_col = (thread_idx % 8) / 2; dst_blockid_ = layout(thread_row, thread_col); } else { is_signalling_thread_ = 0; #ifndef NDEBUG asm volatile ("brkpt;\n" ::); #endif } // STEP 2: Find if this dst block-id needs an arrival for this problem is_signalling_thread_ &= dst_blockid_ < cluster_size; is_signalling_thread_ &= is_same_row_or_col(dst_blockid_, block_id, cluster_shape); } template <typename ClusterShape> CUTLASS_DEVICE bool is_same_row_or_col(int dst_block_id, dim3 block_id, ClusterShape cluster_shape) { return (((dst_block_id % cute::size<0>(cluster_shape)) == block_id.x) || ( ((dst_block_id / cute::size<0>(cluster_shape)) == block_id.y) )); } //////////////////// // Producer APIs //////////////////// // Four member functions are always used in pairs: // // * producer_try_acquire and producer_acquire, and // * consumer_try_wait and consumer_wait. // // The two functions with "try" in their names are called "try" functions, // and the other two are conceptually "finalize" functions. // The "try" function in each pair starts the process of waiting on the barrier to flip. // It opportunistically waits for an implementation-dependent timeout. // Whether or not the barrier has flipped yet, the try function will return a token. // If the token indicates that the barrier has not flipped, // then the token must be passed into the corresponding "finalize" function. // The finalize function will then block until the barrier has flipped. // If the token indicates that the barrier _has_ flipped, // then it is still correct to pass it into the finalize function. // The finalize function will return immediately in that case. CUTLASS_DEVICE ProducerToken producer_try_acquire(PipelineState state, uint32_t skip_wait = false) { return producer_try_acquire(state.index(), state.phase(), skip_wait); } CUTLASS_DEVICE void producer_acquire(PipelineState state, ProducerToken barrier_token = {BarrierStatus::WaitAgain}) { producer_acquire(state.index(), state.phase(), barrier_token); } CUTLASS_DEVICE void producer_commit(PipelineState state, uint32_t bytes) { producer_commit(state.index(), bytes); } // Prevents early exit of producer blocks in Cluster. // This should be called once before kernel exits. CUTLASS_DEVICE void producer_tail(PipelineState state) { for (int count = 0; count < Stages; ++count) { producer_acquire(state, {BarrierStatus::WaitOnly}); ++state; } } CUTLASS_DEVICE ProducerBarrierType* producer_get_barrier(PipelineState state) { return producer_get_barrier(state.index()); } //////////////////// // Consumer APIs //////////////////// CUTLASS_DEVICE ConsumerToken consumer_try_wait(PipelineState state, uint32_t skip_wait = false) { return consumer_try_wait(state.index(), state.phase(), skip_wait); } CUTLASS_DEVICE ConsumerToken consumer_test_wait(PipelineState state, uint32_t skip_wait = false) { return consumer_test_wait(state.index(), state.phase(), skip_wait); } CUTLASS_DEVICE void consumer_wait(PipelineState state) { consumer_wait(state.index(), state.phase()); } CUTLASS_DEVICE void consumer_wait(PipelineState state, ConsumerToken barrier_token) { consumer_wait(state.index(), state.phase(), barrier_token); } CUTLASS_DEVICE void consumer_release(PipelineState state) { consumer_release(state.index()); } private : uint32_t dst_blockid_ = 0; uint32_t is_signalling_thread_ = 0; FullBarrier *full_barrier_ptr_ = nullptr; EmptyBarrier *empty_barrier_ptr_ = nullptr; Params params_; CUTLASS_DEVICE ProducerToken producer_try_acquire(uint32_t stage, uint32_t phase, uint32_t skip_wait) { if (skip_wait) { return {BarrierStatus::WaitDone}; } uint32_t barrier_status = empty_barrier_ptr_[stage].try_wait(phase); return {static_cast<BarrierStatus>(barrier_status)}; } CUTLASS_DEVICE void producer_acquire(uint32_t stage, uint32_t phase, ProducerToken barrier_token) { if (barrier_token != BarrierStatus::WaitDone) { empty_barrier_ptr_[stage].wait(phase); } if (barrier_token == BarrierStatus::WaitOnly) { return; } if (params_.is_leader) { full_barrier_ptr_[stage].arrive_and_expect_tx(params_.transaction_bytes); } #ifndef NDEBUG if (params_.role == ThreadCategory::Consumer || params_.role == ThreadCategory::NonParticipant) { asm volatile ("brkpt;\n" ::); } // Most likely you have elected more than one leader if (params_.is_leader && (threadIdx.x % 32 != 0)) { asm volatile ("brkpt;\n" ::); } #endif } // NOP for TMA based mainloop CUTLASS_DEVICE void producer_commit(uint32_t stage, uint32_t bytes) { // Below code is used only for unit-testing (in the absence of TMA commit) #if CUTLASS_UNIT_TEST_PIPELINE if (params_.is_leader) { // STEP 1 : Commit to self full_barrier_ptr_[stage].complete_transaction(bytes); // STEP 2 : Commit to other blocks in our cluster auto cluster_shape = cute::cluster_shape(); Layout block_layout_in_cluster = make_layout(cluster_shape); dim3 local_block_id = cute::block_id_in_cluster(); CUTLASS_PRAGMA_UNROLL for(int n = 0; n < size<1>(block_layout_in_cluster); ++n) { uint32_t dst_block_id = block_layout_in_cluster(local_block_id.x,n,Int<0>{}); full_barrier_ptr_[stage].complete_transaction(dst_block_id, bytes, n!=local_block_id.y); } CUTLASS_PRAGMA_UNROLL for(int m = 0; m < size<0>(block_layout_in_cluster); ++m) { uint32_t dst_block_id = block_layout_in_cluster(m,local_block_id.y,Int<0>{}); full_barrier_ptr_[stage].complete_transaction(dst_block_id, bytes, m!=local_block_id.x); } } #endif } CUTLASS_DEVICE ConsumerToken consumer_try_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) { if (skip_wait) { return {BarrierStatus::WaitDone}; } uint32_t barrier_status = full_barrier_ptr_[stage].try_wait(phase); return {static_cast<BarrierStatus>(barrier_status)}; } CUTLASS_DEVICE ConsumerToken consumer_test_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) { if (skip_wait) { return {BarrierStatus::WaitDone}; } uint32_t barrier_status = full_barrier_ptr_[stage].test_wait(phase); return {static_cast<BarrierStatus>(barrier_status)}; } // Wait for producer to commit transactions (done by TMA) CUTLASS_DEVICE void consumer_wait(uint32_t stage, uint32_t phase) { full_barrier_ptr_[stage].wait(phase); } // Wait for producer to commit transactions (done by TMA) CUTLASS_DEVICE void consumer_wait(uint32_t stage, uint32_t phase, ConsumerToken barrier_token) { if (barrier_token == BarrierStatus::WaitAgain) { full_barrier_ptr_[stage].wait(phase); } } // Consumer signalling Producer of completion // Ensures all blocks in the Same Row and Column get notifed. CUTLASS_DEVICE void consumer_release(uint32_t stage, uint32_t skip = false) { empty_barrier_ptr_[stage].arrive(dst_blockid_, is_signalling_thread_ & (!skip)); #ifndef NDEBUG if (params_.role == ThreadCategory::Producer || params_.role == ThreadCategory::NonParticipant) { asm volatile ("brkpt;\n" ::); } #endif } CUTLASS_DEVICE ProducerBarrierType* producer_get_barrier(uint32_t stage) { return reinterpret_cast<ProducerBarrierType*>(&full_barrier_ptr_[stage]); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // // TMA store pipeline class // producer-only class, no async barriers between threads because consumer is TMA unit // /////////////////////////////////////////////////////////////////////////////////////////////////// template < int Stages_, // The number of committed TMA store batches that can be in flight upon return of producer acquire int UnacquiredStages_ = Stages_-1 > class PipelineTmaStore { public: static constexpr uint32_t Stages = Stages_; static_assert(Stages_ > 0); static_assert(UnacquiredStages_ >= 0); static constexpr uint32_t UnacquiredStages = static_cast<uint32_t>(UnacquiredStages_); using PipelineState = cutlass::PipelineState<Stages>; struct Params { bool always_wait = false; }; CUTLASS_DEVICE PipelineTmaStore(Params params = {}) : params_(params) {} //////////////////// // Producer APIs //////////////////// // Wait for the least recently committed batch of TMA stores to complete CUTLASS_DEVICE void producer_acquire(PipelineState state) { producer_acquire(state.index(), state.count()); } // Commit the most recently issued batch of TMA stores CUTLASS_DEVICE void producer_commit(PipelineState state) { producer_commit(state.index(), state.count()); } // Wait for all TMA stores to complete CUTLASS_DEVICE void producer_tail([[maybe_unused]] PipelineState state) { tma_store_wait<0>(); } private: Params params_; // Wait for the least recently committed batch of TMA stores to complete // or until at most UnacquiredStages TMA store batches are in-flight (if specified) CUTLASS_DEVICE void producer_acquire([[maybe_unused]] uint32_t stage, uint32_t count) { if (params_.always_wait || count > UnacquiredStages) { tma_store_wait<UnacquiredStages>(); } } // Commit the most recently issued batch of TMA stores CUTLASS_DEVICE void producer_commit([[maybe_unused]] uint32_t stage, [[maybe_unused]] uint32_t count) { tma_store_arrive(); } }; template <> class PipelineTmaStore< /* Stages_ = */ 0, /* UnacquiredStages = Stages_ - 1 = */ -1 > { public: static constexpr uint32_t Stages = 0; static constexpr uint32_t UnacquiredStages = 0; using PipelineState = cutlass::PipelineState<Stages>; struct Params { bool always_wait = false; }; PipelineTmaStore() = default; CUTLASS_DEVICE PipelineTmaStore(Params params) : params_(params) {} //////////////////// // Producer APIs //////////////////// template<class ThisTemplateParameterExistsOnlyForDependentFalse = int> CUTLASS_DEVICE void producer_acquire(PipelineState /* state */, ThisTemplateParameterExistsOnlyForDependentFalse* /* unused */ = nullptr) { static_assert(cutlass::detail::dependent_false<ThisTemplateParameterExistsOnlyForDependentFalse>, "It is never valid to call PipelineTmaStore<0>::producer_acquire"); } // Commit the most recently issued batch of TMA stores CUTLASS_DEVICE void producer_commit(PipelineState state) { producer_commit(state.index(), state.count()); } // Wait for all TMA stores to complete CUTLASS_DEVICE void producer_tail([[maybe_unused]] PipelineState state) { tma_store_wait<0>(); } private: Params params_; // Commit the most recently issued batch of TMA stores CUTLASS_DEVICE void producer_commit([[maybe_unused]] uint32_t stage, [[maybe_unused]] uint32_t count) { tma_store_arrive(); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // // Simple producer-consumer async Pipeline class using producer transaction barriers // /////////////////////////////////////////////////////////////////////////////////////////////////// template <int Stages_> class PipelineTransactionAsync { public : using FullBarrier = cutlass::arch::ClusterTransactionBarrier; using EmptyBarrier = cutlass::arch::ClusterBarrier; using ProducerBarrierType = FullBarrier::ValueType; using ConsumerBarrierType = EmptyBarrier::ValueType; static constexpr uint32_t Stages = Stages_; using PipelineState = cutlass::PipelineState<Stages>; struct SharedStorage { cute::array<FullBarrier, Stages> full_barrier_; cute::array<EmptyBarrier, Stages> empty_barrier_; }; enum class ThreadCategory { NonParticipant, Producer, Consumer, ProducerConsumer }; struct Params { ThreadCategory role = ThreadCategory::NonParticipant; uint32_t transaction_bytes = 0; uint32_t producer_arv_count = 1; uint32_t consumer_arv_count = 1; uint32_t dst_blockid = cute::block_rank_in_cluster(); }; // Constructor CUTLASS_DEVICE PipelineTransactionAsync(SharedStorage& storage, Params const& params) : params_(params) , full_barrier_ptr_(storage.full_barrier_.data()) , empty_barrier_ptr_(storage.empty_barrier_.data()) { int warp_idx = canonical_warp_idx(); int lane_predicate = cute::elect_one_sync(); // Barrier FULL, EMPTY init // Init is done only by thread 0 of the block if (warp_idx == 0 && lane_predicate == 1) { for (int i = 0; i < Stages; ++i) { full_barrier_ptr_[i].init(params.producer_arv_count); empty_barrier_ptr_[i].init(params.consumer_arv_count); } } cutlass::arch::fence_barrier_init(); } //////////////////// // Producer APIs //////////////////// // Four member functions are always used in pairs: // // * producer_try_acquire and producer_acquire, and // * consumer_try_wait and consumer_wait. // // The two functions with "try" in their names are called "try" functions, // and the other two are conceptually "finalize" functions. // The "try" function in each pair starts the process of waiting on the barrier to flip. // It opportunistically waits for an implementation-dependent timeout. // Whether or not the barrier has flipped yet, the try function will return a token. // If the token indicates that the barrier has not flipped, // then the token must be passed into the corresponding "finalize" function. // The finalize function will then block until the barrier has flipped. // If the token indicates that the barrier _has_ flipped, // then it is still correct to pass it into the finalize function. // The finalize function will return immediately in that case. CUTLASS_DEVICE ProducerToken producer_try_acquire(PipelineState state, uint32_t skip_wait = false) { return producer_try_acquire(state.index(), state.phase(), skip_wait); } CUTLASS_DEVICE void producer_acquire(PipelineState state, ProducerToken barrier_token = {BarrierStatus::WaitAgain}) { producer_acquire(state.index(), state.phase(), barrier_token); } // Perform an expect-tx operation on the stage's full barrier. Must be called by 1 thread CUTLASS_DEVICE void producer_expect_transaction(PipelineState state) { producer_expect_transaction(state.index()); } CUTLASS_DEVICE void producer_commit(PipelineState state) { producer_commit(state.index()); } // Prevents early exit of producer blocks in Cluster. // This should be called once before kernel exits. CUTLASS_DEVICE void producer_tail(PipelineState state) { for (int count = 0; count < Stages; ++count) { producer_acquire(state); ++state; } } CUTLASS_DEVICE ProducerBarrierType* producer_get_barrier(PipelineState state) { return producer_get_barrier(state.index()); } //////////////////// // Consumer APIs //////////////////// CUTLASS_DEVICE ConsumerToken consumer_try_wait(PipelineState state, uint32_t skip_wait = false) { return consumer_try_wait(state.index(), state.phase(), skip_wait); } CUTLASS_DEVICE ConsumerToken consumer_test_wait(PipelineState state, uint32_t skip_wait = false) { return consumer_test_wait(state.index(), state.phase(), skip_wait); } CUTLASS_DEVICE void consumer_wait(PipelineState state, ConsumerToken barrier_token = {BarrierStatus::WaitAgain}) { consumer_wait(state.index(), state.phase(), barrier_token); } CUTLASS_DEVICE void consumer_release(PipelineState state) { consumer_release(state.index()); } private: FullBarrier *full_barrier_ptr_ = nullptr; EmptyBarrier *empty_barrier_ptr_ = nullptr; Params params_; CUTLASS_DEVICE ProducerToken producer_try_acquire(uint32_t stage, uint32_t phase, uint32_t skip_wait) { if (skip_wait) { return {BarrierStatus::WaitDone}; } uint32_t barrier_status = empty_barrier_ptr_[stage].try_wait(phase); return {static_cast<BarrierStatus>(barrier_status)}; } CUTLASS_DEVICE void producer_acquire(uint32_t stage, uint32_t phase, ProducerToken barrier_token) { if (barrier_token == BarrierStatus::WaitAgain) { empty_barrier_ptr_[stage].wait(phase); } } // Perform an expect-tx operation on the stage's full barrier. Must be called by 1 thread CUTLASS_DEVICE void producer_expect_transaction(uint32_t stage) { full_barrier_ptr_[stage].expect_transaction(params_.transaction_bytes); } CUTLASS_DEVICE void producer_commit(uint32_t stage) { full_barrier_ptr_[stage].arrive(params_.dst_blockid); } CUTLASS_DEVICE ProducerBarrierType* producer_get_barrier(uint32_t stage) { return reinterpret_cast<ProducerBarrierType*>(&full_barrier_ptr_[stage]); } CUTLASS_DEVICE ConsumerToken consumer_try_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) { if (skip_wait) { return {BarrierStatus::WaitDone}; } uint32_t barrier_status = full_barrier_ptr_[stage].try_wait(phase); return {static_cast<BarrierStatus>(barrier_status)}; } CUTLASS_DEVICE ConsumerToken consumer_test_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) { if (skip_wait) { return {BarrierStatus::WaitDone}; } uint32_t barrier_status = full_barrier_ptr_[stage].test_wait(phase); return {static_cast<BarrierStatus>(barrier_status)}; } CUTLASS_DEVICE void consumer_wait(uint32_t stage, uint32_t phase, ConsumerToken barrier_token) { if (barrier_token == BarrierStatus::WaitAgain) { full_barrier_ptr_[stage].wait(phase); } } CUTLASS_DEVICE void consumer_release(uint32_t stage, uint32_t skip = false) { empty_barrier_ptr_[stage].arrive(params_.dst_blockid, (not skip)); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // // Simple producer-consumer async Pipeline class // /////////////////////////////////////////////////////////////////////////////////////////////////// template <int Stages_> class PipelineAsync { public : using FullBarrier = cutlass::arch::ClusterBarrier; using EmptyBarrier = cutlass::arch::ClusterBarrier; using ProducerBarrierType = FullBarrier::ValueType; using ConsumerBarrierType = EmptyBarrier::ValueType; static constexpr uint32_t Stages = Stages_; using PipelineState = cutlass::PipelineState<Stages>; struct SharedStorage { FullBarrier full_barrier_[Stages]; EmptyBarrier empty_barrier_[Stages]; }; enum class ThreadCategory { NonParticipant, Producer, Consumer, ProducerConsumer }; struct Params { ThreadCategory role = ThreadCategory::NonParticipant; uint32_t producer_arv_count = 1; uint32_t consumer_arv_count = 1; uint32_t dst_blockid = cute::block_rank_in_cluster(); }; // Default assumption when only storage is passed is : // => single producer, single consumer & they are in the same block (within the Cluster) CUTLASS_DEVICE PipelineAsync(SharedStorage& storage) : PipelineAsync(storage, {}) {} CUTLASS_DEVICE PipelineAsync( SharedStorage& storage, Params const& params) : params_(params), full_barrier_ptr_(&storage.full_barrier_[0]), empty_barrier_ptr_(&storage.empty_barrier_[0]) { int warp_idx = canonical_warp_idx(); int lane_predicate = cute::elect_one_sync(); // Barrier FULL, EMPTY init // Init is done only by thread 0 of the block if (warp_idx == 0 && lane_predicate == 1) { for (int i = 0; i < Stages; ++i) { full_barrier_ptr_[i].init(params.producer_arv_count); empty_barrier_ptr_[i].init(params.consumer_arv_count); } } cutlass::arch::fence_barrier_init(); } //////////////////// // Producer APIs //////////////////// // Four member functions are always used in pairs: // // * producer_try_acquire and producer_acquire, and // * consumer_try_wait and consumer_wait. // // The two functions with "try" in their names are called "try" functions, // and the other two are conceptually "finalize" functions. // The "try" function in each pair starts the process of waiting on the barrier to flip. // It opportunistically waits for an implementation-dependent timeout. // Whether or not the barrier has flipped yet, the try function will return a token. // If the token indicates that the barrier has not flipped, // then the token must be passed into the corresponding "finalize" function. // The finalize function will then block until the barrier has flipped. // If the token indicates that the barrier _has_ flipped, // then it is still correct to pass it into the finalize function. // The finalize function will return immediately in that case. CUTLASS_DEVICE ProducerToken producer_try_acquire(PipelineState state, uint32_t skip_wait = false) { return producer_try_acquire(state.index(), state.phase(), skip_wait); } CUTLASS_DEVICE void producer_acquire(PipelineState state, ProducerToken barrier_token = {BarrierStatus::WaitAgain}) { producer_acquire(state.index(), state.phase(), barrier_token); } CUTLASS_DEVICE void producer_commit(PipelineState state) { producer_commit(state.index()); } template<class UserDefinedArriveOp> CUTLASS_DEVICE void producer_commit(PipelineState state, UserDefinedArriveOp&& user_defined_arrive_op) { cute::forward<UserDefinedArriveOp>(user_defined_arrive_op)(producer_get_barrier(state.index())); producer_commit(state); } // Prevents early exit of producer blocks in Cluster. // This should be called once before kernel exits. CUTLASS_DEVICE void producer_tail(PipelineState state) { for (int count = 0; count < Stages; ++count) { producer_acquire(state); ++state; } } CUTLASS_DEVICE ProducerBarrierType* producer_get_barrier(PipelineState state) { return producer_get_barrier(state.index()); } //////////////////// // Consumer APIs //////////////////// CUTLASS_DEVICE ConsumerToken consumer_try_wait(PipelineState state, uint32_t skip_wait = false) { return consumer_try_wait(state.index(), state.phase(), skip_wait); } CUTLASS_DEVICE ConsumerToken consumer_test_wait(PipelineState state, uint32_t skip_wait = false) { return consumer_test_wait(state.index(), state.phase(), skip_wait); } CUTLASS_DEVICE void consumer_wait(PipelineState state, ConsumerToken barrier_token = {BarrierStatus::WaitAgain}) { consumer_wait(state.index(), state.phase(), barrier_token); } CUTLASS_DEVICE void consumer_release(PipelineState state) { consumer_release(state.index()); } private: Params params_; FullBarrier *full_barrier_ptr_; EmptyBarrier *empty_barrier_ptr_; CUTLASS_DEVICE ProducerToken producer_try_acquire(uint32_t stage, uint32_t phase, uint32_t skip_wait) { if (skip_wait) { return {BarrierStatus::WaitDone}; } uint32_t barrier_status = empty_barrier_ptr_[stage].try_wait(phase); return {static_cast<BarrierStatus>(barrier_status)}; } CUTLASS_DEVICE void producer_acquire(uint32_t stage, uint32_t phase, ProducerToken barrier_token) { if (barrier_token == BarrierStatus::WaitAgain) { empty_barrier_ptr_[stage].wait(phase); } } CUTLASS_DEVICE void producer_commit(uint32_t stage) { full_barrier_ptr_[stage].arrive(); } CUTLASS_DEVICE ProducerBarrierType* producer_get_barrier(uint32_t stage) { return reinterpret_cast<ProducerBarrierType*>(&full_barrier_ptr_[stage]); } CUTLASS_DEVICE ConsumerToken consumer_try_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) { if (skip_wait) { return {BarrierStatus::WaitDone}; } uint32_t barrier_status = full_barrier_ptr_[stage].try_wait(phase); return {static_cast<BarrierStatus>(barrier_status)}; } CUTLASS_DEVICE ConsumerToken consumer_test_wait(uint32_t stage, uint32_t phase, uint32_t skip_wait) { if (skip_wait) { return {BarrierStatus::WaitDone}; } uint32_t barrier_status = full_barrier_ptr_[stage].test_wait(phase); return {static_cast<BarrierStatus>(barrier_status)}; } CUTLASS_DEVICE void consumer_wait(uint32_t stage, uint32_t phase) { uint32_t done = full_barrier_ptr_[stage].test_wait(phase); if (!done) { full_barrier_ptr_[stage].wait(phase); } } CUTLASS_DEVICE void consumer_wait(uint32_t stage, uint32_t phase, ConsumerToken barrier_token) { if (barrier_token == BarrierStatus::WaitAgain) { full_barrier_ptr_[stage].wait(phase); } } CUTLASS_DEVICE void consumer_release(uint32_t stage) { empty_barrier_ptr_[stage].arrive(params_.dst_blockid); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // // Barrier to ensure an Ordered Sequence between // SequenceLength number of groups (each with group_size participants) executing SequenceDepth Stages // i.e., for all i < j - only after id "i" arrives at a particular stage "m" // will the wait() for id "j" succeed for the same stage // /////////////////////////////////////////////////////////////////////////////////////////////////// template<int SequenceDepth, int SequenceLength> class OrderedSequenceBarrier { public : using Barrier = cutlass::arch::ClusterBarrier; struct SharedStorage { Barrier barrier_[SequenceDepth][SequenceLength]; }; struct Params { uint32_t group_id; uint32_t group_size; }; private : // In future this Params object can be replaced easily with a CG object Params params_; Barrier *barrier_ptr_; PipelineState<SequenceDepth> stage_; static constexpr int Depth = SequenceDepth; static constexpr int Length = SequenceLength; public: OrderedSequenceBarrier() = delete; OrderedSequenceBarrier(const OrderedSequenceBarrier&) = delete; OrderedSequenceBarrier(OrderedSequenceBarrier&&) = delete; OrderedSequenceBarrier& operator=(const OrderedSequenceBarrier&) = delete; OrderedSequenceBarrier& operator=(OrderedSequenceBarrier&&) = delete; ~OrderedSequenceBarrier() = default; CUTLASS_DEVICE OrderedSequenceBarrier(SharedStorage& storage, Params const& params) : params_(params), barrier_ptr_(&storage.barrier_[0][0]), // Group 0 - starts with an opposite phase stage_({0, params.group_id == 0, 0}) { int warp_idx = canonical_warp_idx(); int lane_predicate = cute::elect_one_sync(); // Barrier FULL, EMPTY init // Init is done only by the one elected thread of the block if (warp_idx == 0 && lane_predicate == 1) { for (int d = 0; d < Depth; ++d) { for (int l = 0; l < Length; ++l) { barrier_ptr_[d * Length + l].init(params.group_size); } } } cutlass::arch::fence_barrier_init(); } // Wait on a stage to be unlocked CUTLASS_DEVICE void wait() { get_barrier_for_current_stage(params_.group_id).wait(stage_.phase()); } // Signal completion of Stage and move to the next stage // (group_id) signals to (group_id+1) CUTLASS_DEVICE void arrive() { int signalling_id = (params_.group_id + 1) % Length; get_barrier_for_current_stage(signalling_id).arrive(); ++stage_; } CUTLASS_DEVICE void advance() { ++stage_; } private: CUTLASS_DEVICE Barrier& get_barrier_for_current_stage(int group_id) { return barrier_ptr_[stage_.index() * Length + group_id]; } }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // end namespace cutlass
cutlass/include/cutlass/pipeline/sm90_pipeline.hpp/0
{ "file_path": "cutlass/include/cutlass/pipeline/sm90_pipeline.hpp", "repo_id": "cutlass", "token_count": 12793 }
46
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Ell iterator for matrix of indices (ellColInd matrix) */ #pragma once namespace cutlass { namespace transform { namespace threadblock { namespace ell{ constexpr unsigned int SmemPow = 8; constexpr unsigned int SmemStages = 2; constexpr unsigned int SmemSize = 1 << SmemPow; constexpr unsigned int SmemMask = (SmemSize*SmemStages-1); class SharedStorage{ public: Array<int, SmemSize*SmemStages> array; }; class Iterator{ public: using Layout = layout::PitchLinear; using LongIndex = typename Layout::LongIndex; private: const int *gmem_col_idx_; int *smem_col_idx_; const int block_size_; const int base_idx_; const int k_shape_; const int ell_increment_; const int array_length_; int col_idx_base_; int residue_; int counter_; int pow2_; int residue_shape_; int smem_offset_; int smem_stage_; int gmem_offset_; int lane_; bool is_pow2_; bool is_residue_tile_; public: CUTLASS_DEVICE void load_ell_indices(){ for(int i=threadIdx.x; i<SmemSize; i+=blockDim.x){ int idx = (gmem_offset_+i < array_length_) ? gmem_offset_+i : array_length_-1; int gmem_col_idx = gmem_col_idx_[idx] - base_idx_; smem_col_idx_[i + smem_stage_ * SmemSize] = (gmem_col_idx >= 0) ? gmem_col_idx : -1; } gmem_offset_ += SmemSize; smem_stage_ ^= 1; } CUTLASS_DEVICE Iterator( SharedStorage& shared_storage_base, const int* col_idx, const int& block_size, const int& base_idx, const int k_shape, const int& problem_size_k, const int& ell_stride, const int& thread_idx) : residue_(0), counter_(0), smem_offset_(0), smem_stage_(0), gmem_offset_(0), block_size_(block_size), base_idx_(base_idx), k_shape_(k_shape), ell_increment_(ell_stride * block_size), array_length_((problem_size_k + block_size_ - 1) / block_size_), residue_shape_(problem_size_k % k_shape_), is_residue_tile_(residue_shape_ != 0), smem_col_idx_(reinterpret_cast<int*>(&shared_storage_base.array)), gmem_col_idx_(const_cast<int*>(col_idx)), lane_(thread_idx % 32) { load_ell_indices(); __syncthreads(); is_pow2_ = ((block_size_ & (block_size_ - 1)) == 0); if( is_pow2_ && k_shape <= block_size_ ) lane_ = 0; col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_; pow2_ = 0; while(block_size_ >> (pow2_ + 1)) ++pow2_; } CUTLASS_DEVICE int get_blocksize(){ return block_size_; } CUTLASS_DEVICE Iterator &operator++(){ if(is_residue_tile_){ residue_ += residue_shape_; is_residue_tile_ = false; } else { residue_ += k_shape_; } if(residue_ < block_size_){ return *this; } if((array_length_ > SmemSize) && (((smem_offset_ >> SmemPow) & 1) != smem_stage_)) load_ell_indices(); if(residue_ == block_size_){ ++smem_offset_; counter_ += ell_increment_; residue_ = 0; col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_ - counter_; return *this; } if(is_pow2_){ smem_offset_ += residue_ >> pow2_; counter_ += (residue_ >> pow2_) * ell_increment_; residue_ = residue_ & ((1 << pow2_) - 1); } else { smem_offset_ += residue_ / block_size_; counter_ += (residue_ / block_size_) * ell_increment_; residue_ %= block_size_; } col_idx_base_ = smem_col_idx_[(smem_offset_ + lane_) & SmemMask] * ell_increment_ - counter_; return *this; } CUTLASS_DEVICE LongIndex get_offset(const int& idx) { int num_jump_tiles; if(is_pow2_) num_jump_tiles = (idx + residue_) >> pow2_; else num_jump_tiles = (idx + residue_) / block_size_; int tmp = __shfl_sync(0xffffffff, col_idx_base_, num_jump_tiles); return tmp - num_jump_tiles * ell_increment_; } CUTLASS_DEVICE LongIndex get_offset_fast() { return col_idx_base_; } }; } } } }
cutlass/include/cutlass/transform/threadblock/ell_iterator.h/0
{ "file_path": "cutlass/include/cutlass/transform/threadblock/ell_iterator.h", "repo_id": "cutlass", "token_count": 2651 }
47
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing computing the addresses of storing of tiles from pitch-linear rank=2 tensors. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_coord.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// template <typename Shape, typename Element, typename Layout, int AdvanceRank, typename ThreadMap, bool Dynamic_iterations = false, int Alignment = sizeof_bits<Element>::value* ThreadMap::kElementsPerAccess / 8 > class RegularTileAccessIteratorDirectConv; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for congruous arrangements for TensorOps with dynamic_iterations OFF /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment> class RegularTileAccessIteratorDirectConv< Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, false, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::PitchLinear; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using StrideIndex = typename Layout::Stride::Index; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Element type per access using AccessType = Array<Element, ThreadMap::kElementsPerAccess>; private: // // Data members // /// Stride value StrideIndex stride_; /// Internal pointer to first access of tile AccessType *pointer_; /// Internal byte offset Index byte_offset_; /// Iteration in the contiguous dimension int iteration_contiguous_; /// Iteration in the strided dimension int iteration_strided_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : stride_(ref.stride(0) / ThreadMap::kElementsPerAccess), byte_offset_(0) { layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id); // initialize pointer pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_base)); set_iteration_index(0); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; iteration_strided_ = index / ThreadMap::Iterations::kContiguous; } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_num(int num) { //Do nothing } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_offset_ += pointer_offset * sizeof(Element); } /// Returns a pointer CUTLASS_DEVICE AccessType *get() const { AccessType *access_ptr = pointer_; int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ + iteration_contiguous_ * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess; char *access_byte_ptr = reinterpret_cast<char *>(access_ptr + access_offset); return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIteratorDirectConv &operator++() { ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) return *this; // Enter here only if (iteration_contiguous_ == // ThreadMap::Iteration::kContiguous) iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) // which means we enter the next tile. iteration_strided_ = 0; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIteratorDirectConv operator++(int) { RegularTileAccessIteratorDirectConv prev(*this); this->operator++(); return prev; } /// Adds a tile offset in the unit of tile. CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { add_pointer_offset(coord.contiguous() * Shape::kContiguous + coord.strided() * ThreadMap::Iterations::kStrided * ThreadMap::Delta::kStrided * stride_ * ThreadMap::kElementsPerAccess); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for congruous arrangements for TensorOps with dynamic_iterations ON /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment> class RegularTileAccessIteratorDirectConv< Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_,true, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::PitchLinear; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using StrideIndex = typename Layout::Stride::Index; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Element type per access using AccessType = Array<Element, ThreadMap::kElementsPerAccess>; private: // // Data members // /// Stride value StrideIndex stride_; /// Internal pointer to first access of tile AccessType *pointer_; /// Internal byte offset Index byte_offset_; /// Iteration in the contiguous dimension int iteration_contiguous_; /// Iteration in the strided dimension int iteration_strided_; /// Total iterattions in the strided dimension: Dynamic value int total_iteration_strided_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : stride_(ref.stride(0) / ThreadMap::kElementsPerAccess), byte_offset_(0) { layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id); // initialize pointer pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_base)); set_iteration_index(0); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; iteration_strided_ = index / ThreadMap::Iterations::kContiguous; } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_num(int num) { total_iteration_strided_ = num; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_offset_ += pointer_offset * sizeof(Element); } /// Returns a pointer CUTLASS_DEVICE AccessType *get() const { AccessType *access_ptr = pointer_; int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ + iteration_contiguous_ * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess; char *access_byte_ptr = reinterpret_cast<char *>(access_ptr + access_offset); return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIteratorDirectConv &operator++() { ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) return *this; // Enter here only if (iteration_contiguous_ == // ThreadMap::Iteration::kContiguous) iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < total_iteration_strided_) { return *this; } // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) // which means we enter the next tile. iteration_strided_ = 0; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIteratorDirectConv operator++(int) { RegularTileAccessIteratorDirectConv prev(*this); this->operator++(); return prev; } /// Adds a tile offset in the unit of tile. CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { add_pointer_offset(coord.contiguous() * Shape::kContiguous + coord.strided() * total_iteration_strided_ * ThreadMap::Delta::kStrided * stride_ * ThreadMap::kElementsPerAccess); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for column major layouts /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_,bool Dynamic_iterations, int Alignment > class RegularTileAccessIteratorDirectConv< Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, Dynamic_iterations , Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajor; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileAccessIteratorDirectConv< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap_, Dynamic_iterations>; using AccessType = typename UnderlyingIterator::AccessType; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : iterator_({ref.data(), ref.stride()}, thread_id) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_num(int num) { iterator_.set_iteration_num(num); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.row(), coord.column()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIteratorDirectConv &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIteratorDirectConv operator++(int) { RegularTileAccessIteratorDirectConv prev(*this); ++iterator_; return prev; } }; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for row major layouts /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_,bool Dynamic_iterations, int Alignment> class RegularTileAccessIteratorDirectConv< Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, Dynamic_iterations, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajor; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileAccessIteratorDirectConv< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap_, Dynamic_iterations>; using AccessType = typename UnderlyingIterator::AccessType; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileAccessIteratorDirectConv(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : iterator_({ref.data(), ref.stride()}, thread_id) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_num(int num) { iterator_.set_iteration_num(num); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.column(), coord.row()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIteratorDirectConv &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIteratorDirectConv operator++(int) { RegularTileAccessIteratorDirectConv prev(*this); ++iterator_; return prev; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h/0
{ "file_path": "cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h", "repo_id": "cutlass", "token_count": 6122 }
48
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Code Organization") [README](../../README.md#documentation) > **Code Organization** # CUTLASS Code Organization This document describes the layout of the CUTLASS repository. The main components are: * **CUTLASS Template Library** - CUDA Templates for Linear Algebra Subroutines and Solvers (header only) * **CuTe Template Library** - CUTLASS's core vocabulary layout type and associated algebra (header only) * **CUTLASS Utilities** - Additional templates * **CUTLASS Instance Library** - instantiations of CUTLASS templates covering the design space * **CUTLASS Profiler** - CUTLASS Library, Profiler, and Utilities * **Examples** - SDK examples of CUTLASS Template Library and components * **Media** - supporting documentation and media content * **Tests** - test components for CUTLASS Template Library and tools ## CUTLASS Template Library CUDA Templates for Linear Algebra Subroutines and Solvers is a library of CUDA C++ template classes for performing efficient matrix computations on NVIDIA GPUs. Like NVIDIA CUB, the components of CUTLASS are organized hierarchically based on the scope of cooperative elements. For example, warp-level GEMM components perform a matrix multiply collectively by the set of threads within a warp. The following figure illustrates each layer. Components are designed to be usable by client applications accessing functionailty at each scope. CUTLASS Templates are implemented by header files in the following directory structure: ``` include/ # Top-level include directory. Client applications should target this path. cutlass/ # CUDA Templates for Linear Algebra Subroutines and Solvers - headers only arch/ # direct exposure of architecture features (including instruction-level GEMMs) * gemm/ # code specialized for general matrix product computations thread/ # thread-level operators warp/ # warp-level operators collective/ # 3.x API operators for all threads a tiled mma/copy are built over threadblock/ # CTA-level operators kernel/ # CUDA kernel entry points device/ # launches kernel(s) over a full device * # scope-agnostic components and basic vocabulary type definitions for GEMM layout/ # layout definitions for matrices, tensors, and other mathematical objects in memory * reduction/ # bandwidth-limited reduction kernels that do not fit the "gemm" models thread/ # thread-level operators warp/ # warp-level operators threadblock/ # CTA-level operators kernel/ # CUDA kernel entry points device/ # launches kernel(s) over a full device * # scope-agnostic components and basic vocabulary type definitions transform/ # code specialized for layout, type, and domain transformations thread/ # thread-level operators warp/ # warp-level operators threadblock/ # CTA-level operators kernel/ # CUDA kernel entry points device/ # launches kernel(s) over a full device * # scope-agnostic components and basic vocabulary type definitions util/ # miscellaneous CUTLASS components * * # core vocabulary types and fundamental arithmetic operators cute / # CuTe Layout, layout algebra, MMA/Copy atoms, tiled MMA/Copy algorithm/ # Definitions of core operations such as copy, gemm, and operations on cute::tuples arch/ # Bare bones PTX wrapper structs for copy and math instructions atom/ # Meta-information either link to or built from arch/ operators mma_atom.hpp # cute::Mma_Atom and cute::TiledMma copy_atom.hpp # cute::Copy_Atom and cute::TiledCopy *sm*.hpp # Arch specific meta-information for copy and math operations container/ # Core container types used across CuTe, namely, cute::tuple numeric/ # CuTe's internal numerics implementation * # Core library types such as Shape, Stride, Layout, Tensor, and associated operations ``` See [Programming Guidelines](/media/docs/programming_guidelines.md) for further details about conventions and design patterns used throughout CUTLASS. ## CuTe CuTe is a collection of C++ CUDA template abstractions for defining and operating on hierarchically multidimensional layouts of threads and data. CuTe provides `Layout` and `Tensor` objects that compactly packages the type, shape, memory space, and layout of data, while performing the complicated indexing for the user. This lets programmers focus on the logical descriptions of their algorithms while CuTe does the mechanical bookkeeping for them. With these tools, we can quickly design, implement, and modify all dense linear algebra operations. More documentation for CuTe can be found in [`/media/docs/cute/`](/media/docs/cute/). ## Tools The `tools/` directory contains clients of the CUTLASS Template library and includes the following. ## CUTLASS Instance Library The CUTLASS Instance Library contains instantiations of the above CUTLASS templates covering supported configurations, data types, block structure, and tile sizes. These instantiations are procedurally generated using a set of scripts to span the design space. ``` tools/ library/ # static/dynamic library containing all kernel instantiations of interest # (with some build-level filter switches to compile specific subsets) include/ cutlass/ library/ # header files for CUTLASS Deliverables Library (in cutlass::library:: namespace) handle.h # implements a host-side API for launching kernels, similar to cuBLAS library.h # defines enums and structs to describe the tiled structure of operator instances manifest.h # collection of all instances src/ python/ cutlass_library/ # scripts to procedurally generate CUTLASS template instances gemm_operations.py library.py generator.py # entry point of procedural generation scripts - invoked by cmake manifest.py ``` When CMake is executed, the CUTLASS Instance Library generator scripts are executed to construct a set of instantiations in `build/tools/library/generated/`. ### CUTLASS Profiler The CUTLASS Profiler is designed to load the CUTLASS Instance Library and execute all operations contained therein. This command-line driven application constructs an execution environment for evaluating functionality and performance. It is implemented in ``` tools/ profiler/ ``` and may be built as follows. ``` $ make cutlass_profiler -j ``` [Further details about the CUTLASS Profiler are described here.](/media/docs/profiler.md) ### CUTLASS Utilities `tools/util/` defines a companion library of headers and sources that support the CUTLASS test programs, examples, and other client applications. Its structure is as follows: ``` tools/ util/ include/ cutlass/ util/ # CUTLASS Utility companion library reference/ # functional reference implementation of CUTLASS operators # (minimal consideration for performance) detail/ * device/ # device-side reference implementations of CUTLASS operators thread/ kernel/ * host/ # host-side reference implementations of CUTLASS operators * * ``` [More details about CUTLASS Utilities may be found here.](/media/docs/utilities.md) ## Examples To demonstrate CUTLASS components, several SDK examples are implemented in `examples/`. CUTLASS SDK examples apply CUTLASS templates to implement basic computations. ``` examples/ 00_basic_gemm/ # launches a basic GEMM with single precision inputs and outputs 01_cutlass_utilities/ # demonstrates CUTLASS Utilities for allocating and initializing tensors 02_dump_reg_smem/ # debugging utilities for printing register and shared memory contents 03_visualize_layout/ # utility for visualizing all layout functions in CUTLASS 04_tile_iterator/ # example demonstrating an iterator over tiles in memory 05_batched_gemm/ # example demonstrating CUTLASS's batched strided GEMM operation 06_splitK_gemm/ # exmaple demonstrating CUTLASS's Split-K parallel reduction kernel 07_volta_tensorop_gemm/ # example demonstrating mixed precision GEMM using Volta Tensor Cores 08_turing_tensorop_gemm/ # example demonstrating integer GEMM using Turing Tensor Cores 10_planar_complex/ # example demonstrating planar complex GEMM kernels 11_planar_complex_array/ # example demonstrating planar complex kernels with batch-specific problem sizes 12_gemm_bias_relu/ # example demonstrating GEMM fused with bias and relu activation function 13_fused_two_gemms/ # example demonstrating two GEMMs fused into one kernel ``` ## Media This directory contains documentation, images, and performance result data which accompanies the CUTLASS library and components. ## Tests Test programs for CUTLASS. Tests are organized hierarchically, mirroring the organization of source files. ``` test/ # unit tests for CUTLASS Template Library unit/ arch/ core/ gemm/ device/ kernel/ thread/ threadblock/ warp/ reduction/ kernel/ thread/ transform/ threadblock/ * ``` Tests can be built and run at the top level scope by invoking `make test_unit` or by building and explicitly executing each individual target, e.g. `cutlass_test_unit_gemm_device`. Tests are configured to specify appropriate GTest filter strings to avoid running except on architectures where they are expected to pass. Thus, no tests should fail. The actual number of tests run may vary over time as more are added. # Copyright Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: BSD-3-Clause ``` Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
cutlass/media/docs/code_organization.md/0
{ "file_path": "cutlass/media/docs/code_organization.md", "repo_id": "cutlass", "token_count": 4180 }
49
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS GEMM API") [README](../../README.md#documentation) > **CUTLASS GEMM API** # CUTLASS GEMM API CUTLASS presents a uniform programming model for matrix multiply-accumulate operations at each level of the hierarchy. This document focuses on device-level, threadblock-level GEMMs, warp-level GEMMs, thread-level GEMMs, and instruction-level GEMMs. # CUTLASS GEMM Model CUTLASS implements the basic GEMM triple loop nest with a tiled structure mirroring the execution model hierarchy. The following pseudocode describes the model for a GEMM kernel targeting a warp-synchronous matrix multiply instruction like mma.sync. The entire operation is referred to as "Gemm," as it is assumed that an epilogue operation performs the general matrix update similar to BLAS. ```c++ // cutlass::gemm::device::Gemm // for (int cta_n = 0; cta_n < GemmN; cta_n += CtaTileN) { // for each CTA } CTA-level concurrency for (int cta_m = 0; cta_m < GemmM; cta_m += CtaTileM) { // for each CTA } // // cutlass::gemm::threadblock::Mma // for (int cta_k = 0; cta_k < GemmK; cta_k += CtaTileK) { // "GEMM mainloop" - no unrolling - one iteration of this loop is one "stage" // for (int warp_n = 0; warp_n < CtaTileN; warp_n += WarpTileN) { // for each warp } warp-level concurrency for (int warp_m = 0; warp_m < CtaTileM; warp_m += WarpTileM) { // for each warp } // for (int warp_k = 0; warp_k < CtaTileK; warp_k += WarpTileK) { // fully unroll across CtaTileK - one iteration of this loop is one "k Group" // for (int mma_k = 0; mma_k < WarpTileK; mma_k += MmaK) { // cutlass::gemm::warp::Mma for (int mma_n = 0; mma_n < WarpTileN; mma_n += MmaN) { // for (int mma_m = 0; mma_m < WarpTileM; mma_m += MmaM) { // // mma_instruction(d, a, b, c); // cutlass::arch::mma - warp-wide matrix multiply instruction } // for mma_m } // for mma_n } // for mma_k } // for warp_k } // for warp_m } // for warp_n } // for cta_k } // for cta_m } // for cta_n ``` The outer-most loops correspond to CTA-level hardware concurrency and are not explicitly written as loops in the code. These are implied by CUDA grid launch semantics. The comment `cutlass::gemm::threadblock::Mma` refers to the threadblock-scoped matrix multiply-accumulate concept. This is the computation performed by one threadblock to compute a matrix product in registers. The "GEMM main loop" is listed. The comment `cutlass::gemm::warp::Mma` refers to the computation performed by each warp. This is a nested loop executing a sequence of accumulated outer products. The inner-most operation corresponds directly to hardware support. In this example, the nested structure terminates with warp-synchronous matrix multiply instructions targeting Tensor Cores. Alternatively, GEMMs targeting single-thread instructions may have an additional series of nested loops corresponding to thread-level concurrency. # CUTLASS GEMM Components This loop nest is expressed in CUTLASS via the following components which are specialized for data type, layout, and math instruction. ![ALT](/media/images/cutlass-gemm-components.png "CUTLASS GEMM Components") These components are described in the following sections. ## Device-wide GEMM API The device-level GEMM API is intended to streamline instantiation and execution of the standard GEMM computation across the GPU. This operator is intended to be used in host-side .cu code and has semantics similar to cuBLAS. The device-wide GEMM API is embodied by the following operators: - [cutlass::gemm::device::Gemm](/include/cutlass/gemm/device/gemm.h) - basic GEMM operation - [cutlass::gemm::device::GemmArray](/include/cutlass/gemm/device/gemm_array.h) - batched GEMM operation in which input matrices are read from arrays of pointers - [cutlass::gemm::device::GemmBatched](/include/cutlass/gemm/device/gemm_batched.h) - batched GEMM operation in which input matrices are separated by a constant stride - [cutlass::gemm::device::GemmSplitKParallel](/include/cutlass/gemm/device/gemm_splitk_parallel.h) - GEMM operation that partitions the GEMM K dimension then launches a separate reduction kernel **Example:** launch a mixed-precision GEMM targeting Volta Tensor Cores. ```c++ using Gemm = cutlass::gemm::device::Gemm< cutlass::half_t, // ElementA cutlass::layout::ColumnMajor, // LayoutA cutlass::half_t, // ElementB cutlass::layout::ColumnMajor, // LayoutB cutlass::half_t, // ElementOutput cutlass::layout::ColumnMajor, // LayoutOutput float, // ElementAccumulator cutlass::arch::OpClassTensorOp, // tag indicating Tensor Cores cutlass::arch::Sm70 // tag indicating target GPU compute architecture >; Gemm gemm_op; cutlass::Status status; // // Launch GEMM on the device // status = gemm_op({ {m, n, k}, {ptrA, lda}, {ptrB, ldb}, {ptrC, ldc}, {ptrD, ldd}, {alpha, beta} }); if (status != cutlass::Status::kSuccess) { return -1; } ``` ## Threadblock-level GEMM API GEMMs at this scope are expected to efficiently load tiles of data from global memory into internal storage and then compute matrix products with warp-level GEMM operators. The threadblock-scoped matrix multiply operation is embodied by [cutlass::gemm::threadblock::MmaPipelined](/include/cutlass/gemm/threadblock/mma_pipelined.h). This is a class inspired by [std::transform_reduce()](https://en.cppreference.com/w/cpp/algorithm/transform_reduce) which computes the accumulated matrix product of a range of tiles defined by tile iterators. ![ALT](/media/images/cutlass-threadblock-mma-pipelined.png "cutlass::gemm::threadblock::MmaPipelined") In the case of GEMM, the tile iterators are [cutlass::transform::threadblock::PredicatedTileIterator](/include/cutlass/transform/threadblock/predicated_tile_iterator.h) to traverse a sequence of tiles in global memory with appropriate predication to avoid out-of-bounds memory accesses. *Concept.* Threadblock-level matrix multiply accumulate operators are function objects satisfying the following concept. ```c++ struct Mma { /// Shape of warp-level matrix operation (concept: GemmShape) struct Shape; /// Data type of multiplicand A (concept: numeric type) struct ElementA; /// Layout of multiplicand A (concept: Layout) struct LayoutA; /// Data type of multiplicand B (concept: numeric type) struct ElementB; /// Layout of multiplicand B (concept: Layout) struct LayoutB; /// Data type of accumulator matrix C (concept: numeric type) struct ElementC; /// Layout of accumulator matrix C (concept: Layout) struct LayoutC; /// Iterator of A operand in shared memory - satisfies: ReadableRandomAccessTileIteratorConcept struct IteratorA; /// Fragment object loaded from IteratorA (concept: Array<ElementA, ..>) struct FragmentA; /// Iterator of B operand in shared memory - satisfies: ReadableRandomAccessTileIteratorConcept struct IteratorB; /// Fragment object loaded from IteratorB (concept: Array<ElementB, ..>) struct FragmentB; /// Iterator of C operand in shared memory - /// satisfies: ReadableRandomAccessTileIteratorConcept | WriteableRandomAccessTileIteratorConcept struct IteratorC; /// Fragment object loaded from IteratorC (concept: Array<ElementC, ..>) struct FragmentC; /// Warp-level matrix multiply operator (concept: satisfies gemm::warp::Mma) struct Operator; // // Method // /// Computes a matrix product accumulated in D CUTLASS_DEVICE void operator()( FragmentC &D, IteratorA iter_A, IteratorB iter_B, FragmentC const &C); }; ``` ## Warp-level Matrix Multiply API Warp-level GEMM operators load tiles from shared memory into registers and then compute matrix multiplies using either Tensor Cores or CUDA Cores. The result is accumulated in a register tile. Iterators are defined for each operand `A`, `B`, and `C`. The warp-level GEMM API is a generalization of CUDA's WMMA API to achieve the following objectives: - native matrix multiply sizes of Tensor Cores - permuted shared memory layouts to ensure conflict-free accesses - pointer initilization outside of the mainloop - efficient traversal Defining a warp-level matrix multiply in CUTLASS is similar to WMMA as shown below. ![ALT](/media/images/cutlass-warp-level-gemm-api-instantiation.png "CUTLASS vs WMMA API") The usage model is also similar. The following example computes a warp-level GEMM operation, accumulating a series of matrix products in a register-backed array. The input to a warp-level GEMM operation in CUTLASS _must_ be data in shared memory loaded by iterators or on register-backed fragments. ![ALT](/media/images/cutlass-warp-level-gemm-operation.png "CUTLASS warp-level GEMM API") ```c++ #include "cutlass/gemm/warp/default_mma_tensor_op.h" using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using WarpMma = typename cutlass::gemm::warp::DefaultMmaTensorOp< cutlass::gemm::GemmShape<64, 64, 8>, // Overall warp-level GEMM operation cutlass::gemm::GemmShape<16, 8, 8>, // Target instruction cutlass::half_t, LayoutA, // operand A type and layout cutlass::half_t, LayoutB, // operand B type and layout float, // accumulator type cutlass::layout::RowMajor>::Type; // accumulator layout // // Define a GEMM operation loading data from shared memory // int const kGemmK = 32; __shared__ ElementA smem_buffer_A[WarpMma::Shape::kM * kGemmK]; __shared__ ElementB smem_buffer_B[WarpMma::Shape::kN * kGemmK]; // // Construct iterators into SMEM tiles // // leading dimensions inferred from matrix problem size int lda = WarpMma::Shape::kM; int ldb = WarpMma::Shape::kN; // iterators into shared memory WarpMma::IteratorA warp_iterator_A({smem_buffer_A, lda}); WarpMma::IteratorB warp_iterator_B({smem_buffer_B, ldb}); // Fragments in registers storing the operands FragmentA frag_A; FragmentB frag_B; FragmentC accum; WarpMma mma; accum.clear(); // // Accumulated outer product // #pragma unroll 1 for (int k = 0; k < kGemmK; k += WarpMma::Shape::kK) { iter_A.load(frag_A); // Load fragments from A and B matrices iter_B.load(frag_B); ++iter_A; ++iter_B; // Advance along GEMM K to next tile in A // and B matrices // Compute matrix product mma(accum, frag_A, frag_B, accum); } ``` *Concept.* Warp-level Mma operations are function objects satisfying the following concept. ```c++ struct Mma { /// Shape of warp-level matrix operation (concept: GemmShape) struct Shape; /// Data type of multiplicand A (concept: numeric type) struct ElementA; /// Layout of multiplicand A (concept: Layout) struct LayoutA; /// Data type of multiplicand B (concept: numeric type) struct ElementB; /// Layout of multiplicand B (concept: Layout) struct LayoutB; /// Data type of accumulator matrix C (concept: numeric type) struct ElementC; /// Layout of accumulator matrix C (concept: Layout) struct LayoutC; /// Iterator of A operand in shared memory - satisfies: ReadableRandomAccessTileIteratorConcept struct IteratorA; /// Fragment object loaded from IteratorA (concept: Array<ElementA, ..>) struct FragmentA; /// Iterator of B operand in shared memory - satisfies: ReadableRandomAccessTileIteratorConcept struct IteratorB; /// Fragment object loaded from IteratorB (concept: Array<ElementB, ..>) struct FragmentB; /// Iterator of C operand in shared memory - /// satisfies: ReadableRandomAccessTileIteratorConcept | WriteableRandomAccessTileIteratorConcept struct IteratorC; /// Fragment object loaded from IteratorC (concept: Array<ElementC, ..>) struct FragmentC; /// Indicates class of matrix operator (arch::OpClassSimt or arch::OpClassTensorOp) struct OperatorClass; // // Methods // /// Computes a matrix multiply-accumulate CUTLASS_DEVICE void operator()( FragmentC &D, IteratorA A, IteratorB B, FragmentC const &C); }; ``` *Tensor Core Operators.* Warp-level matrix multiply operators targeting Tensor Cores may be defined with the following template arguments. The `Policy` type specifies implementation-level details which may be used to affect performance or internal implementation of the warp-level operator. ```c++ namespace cutlass { namespace gemm { namespace warp { /// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Used for partial specialization typename Enable = bool > class MmaTensorOp {} } // namespace warp } // namespace gemm } // namespace cutlass ``` *SIMT Math Instructions.* Warp-level matrix multiply operators targeting CUDA Cores may be defined with the following template arguments. The `Policy` type specifies implementation-level details which may be used to affect performance or internal implementation of the warp-level operator. ```c++ /// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Used for partial specialization typename Enable = bool > class MmaSimt; ``` ## Thread-level GEMM API Thread-level GEMM operations perform matrix multiply-accumulate on data held in registers. These target CUDA Cores exclusively. *Concept.* Thread-level matrix multiply operations are function objects satisfying the following concept. ```c++ struct Mma { /// Shape of warp-level matrix operation (concept: GemmShape) struct Shape; /// Data type of multiplicand A (concept: numeric type) struct ElementA; /// Layout of multiplicand A (concept: Layout) struct LayoutA; /// Fragment object loaded from IteratorA (concept: Array<ElementA, ..>) struct FragmentA; /// Data type of multiplicand B (concept: numeric type) struct ElementB; /// Layout of multiplicand B (concept: Layout) struct LayoutB; /// Fragment object loaded from IteratorA (concept: Array<ElementB, ..>) struct FragmentB; /// Data type of accumulator matrix C (concept: numeric type) struct ElementC; /// Layout of accumulator matrix C (concept: Layout) struct LayoutC; /// Fragment object loaded from IteratorA (concept: Array<ElementC, ..>) struct FragmentC; // // Methods // /// Computes a matrix multiply-accumulate CUTLASS_DEVICE void operator()( FragmentC &D, FragmentA const &A, FragmentB const &B, FragmentC const &C); }; ``` The CUTLASS thread-level GEMM template accepts the following template arguments. ```c++ namespace cutlass { namespace gemm { namespace thread { /// Structure to compute the matrix product template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape, /// Data type of A elements typename ElementA, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Data type of B elements typename ElementB, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Element type of C matrix typename ElementC, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Concept: arch::OpMultiplyAdd or arch::Mma<> typename Operator = arch::OpMultiplyAdd, /// Used for partial specialization typename Enable = bool > struct Mma; } // namespace thread } // namespace gemm } // namespace cutlass ``` ## Efficient Epilogue CUTLASS GEMM operators perform mma followed by epilogue operation similar to cuBLAS. CUTLASS implements an efficient row-major epilogue. Thus, to achieve column-major GEMM, operands A & B are transposed and swapped. To enable efficient row-major epilogue for both row-major and column-major output layout, CUTLASS' device-level GEMM operators `cutlass::device::Gemm` and `cutlass::device::GemmUniversal` provide two template definitions: - (a) [General definition](/include/cutlass/gemm/device/gemm.h#L217) - (b) [Specialized definition for column-major source/output](/include/cutlass/gemm/device/gemm.h#L545) Efficient row-major epilogue for: - (i) GEMM operator on row-major source/output uses template (a). It runs row-major GEMM and an efficient row-major epilogue. - (ii) GEMM operator on column-major source/output uses template (b). It transposes and swaps operands A and B to enable efficient epilogue. `A x B = C => Transpose(B) x Transpose(A) = Transpose(C)`. For column-major source (C) matrix, Transpose(C) is row-major, and efficient epilogue works on row-major. Note that cuBLAS typically expects a column-major source (C) and output matrix (D). Thus, CUTLASS library only instantiates and generates GEMM operatos with column-major layout. However, CUTLASS by itself can run both row-major and column-major output layouts for all combinations of input layouts. Thus, CUTLASS supports the following layout combinations for input and output layouts: - `{N,T} x {N,T} => {N,T}` - NN, TN, TN, TT GEMM for both row-major and column-major output ## Instruction-level operations CUTLASS defines a template-based interface to Tensor Core operations to avoid resorting to inline PTX. - [mma_sm70.h](/include/cutlass/arch/mma_sm70.h) - Volta TensorCore operations - [mma_sm75.h](/include/cutlass/arch/mma_sm75.h) - Turing TensorCore operations # Copyright Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: BSD-3-Clause ``` Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
cutlass/media/docs/gemm_api.md/0
{ "file_path": "cutlass/media/docs/gemm_api.md", "repo_id": "cutlass", "token_count": 7618 }
50
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Python AST frontend that parses input into DAG IR """ import ast import inspect import textwrap from cutlass_library import DataType import cutlass from cutlass.backend.evt.frontend.frontend_base import EVTFrontendBase from cutlass.backend.epilogue import relu from cutlass.backend.library import FunctionalOp class PythonASTFrontend(EVTFrontendBase, ast.NodeVisitor): def __init__(self, element_compute=DataType.f32, **kwargs): super().__init__(element_compute, **kwargs) # Flags # If this state is True, visit_Constant returns values without creating imm node self.no_imm = False self.visiting_return = False def parse(self, example_inputs): self.example_inputs = example_inputs self.source = textwrap.dedent(inspect.getsource(self.__call__)) self.ast = ast.parse(self.source) self.visit(self.ast) # # Helper functions # @staticmethod def ast_op_to_bindings(op): mapping = { ast.Add: FunctionalOp.Plus, ast.Sub: FunctionalOp.Minus, ast.Mult: FunctionalOp.Multiplies, ast.Div: FunctionalOp.Divides, "relu": relu.binding_type, "multiply_add": FunctionalOp.MultiplyAdd, "sum": (FunctionalOp.Plus, FunctionalOp.AtomicAdd), "max": (FunctionalOp.Maximum, FunctionalOp.AtomicMaximum) } return mapping[op] # # Visiting different node types # def visit_FunctionDef(self, node: ast.FunctionDef): # Visit args and register load nodes for arg in node.args.args: self.visit(arg) for expr in node.body: self.visit(expr) def visit_arg(self, node: ast.arg): # Name of the argument name = node.arg try: example_tensor = self.example_inputs[name] except: raise RuntimeError(f"Example input for {name} is not provided.") self.add_load_node(name, example_tensor) def visit_Name(self, node: ast.Name): return node.id def visit_Constant(self, node: ast.Constant): if self.no_imm: return node.value else: name = self.add_imm(node.value) return name def visit_Tuple(self, node: ast.Tuple): results = [] for elt in node.elts: results.append(self.visit(elt)) return tuple(results) def visit_keyword(self, node: ast.keyword): return {node.arg: self.visit(node.value)} def visit_BinOp(self, node: ast.BinOp): if self.visiting_return: raise SyntaxError("Return value cannot be an expression") lhs = self.visit(node.left) rhs = self.visit(node.right) op = self.ast_op_to_bindings(type(node.op)) name = self.add_compute_node(op) # Add edges # The edge weights are used to sort the input args self.add_edge(lhs, name, weight=0) self.add_edge(rhs, name, weight=1) return name def visit_Assign(self, node: ast.BinOp): target = self.visit(node.targets[0]) value = self.visit(node.value) # Create the assign node self.add_store_node(target) # Add edges self.add_edge(value, target) return target def visit_Call(self, node: ast.Call): if self.visiting_return: raise SyntaxError("Return value cannot be an expression") func = self.visit(node.func) args = [self.visit(arg) for arg in node.args] if func in self.layout_fns.keys(): # Parse kwargs # By default, visiting imm automatically creates a load node # However, in function call, keyword args are used to set # specific function attributes such as indices for permute # So no_imm is set to True temporarily self.no_imm = True kwargs = {} for kw in node.keywords: kwargs.update(self.visit(kw)) self.no_imm = False op = self.layout_fns[func] name = self.add_layout_node(op, kwargs) else: op = self.ast_op_to_bindings(func) name = self.add_compute_node(op) # Add edges for idx, arg in enumerate(args): self.add_edge(arg, name, weight=idx) return name def visit_Return(self, node: ast.Return): self.visiting_return = True results = self.visit(node.value) self.visiting_return = False self.return_names = results if not isinstance(results, tuple): results = (results,) for rst in results: try: example_tensor = self.example_inputs[rst] except: raise RuntimeError(f"Example input for {rst} is not provided.") self.set_store_tensor(rst, example_tensor) self.mark_output(rst)
cutlass/python/cutlass/backend/evt/frontend/python_ast.py/0
{ "file_path": "cutlass/python/cutlass/backend/evt/frontend/python_ast.py", "repo_id": "cutlass", "token_count": 2728 }
51
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Eliminate layout manipulation nodes """ from copy import deepcopy from cutlass.backend.evt.ir import DAGIR, LayoutNode from cutlass.backend.evt.passes.pass_manager import EVTPassBase from cutlass.backend.evt.passes.pass_shape_type_propagation import PassShapeTypePropagation class PassLayoutManipulateElimination(EVTPassBase): """ Eliminate layout manipulation nodes """ dependencies = [PassShapeTypePropagation] def __init__(self, dag_ir: DAGIR) -> None: super().__init__(dag_ir) self.copy_cnt = 0 def call(self): self.layout_nodes_worklist = self.get_all_layout_nodes() # Run while loop utill all layout nodes are eliminated while(len(self.layout_nodes_worklist) > 0): node = self.layout_nodes_worklist.pop(0) # for node in layout_nodes: # Step 1: get the propagation direction direction = self.get_propagation_direction(node) self.visited = [] getattr(self, f"propagate_to_{direction}")(self.dag_ir.get_node_meta(node), node) # Eliminate the current node input_node = self.dag_ir.get_all_inputs(node)[0] self.dag_ir.replace_all_uses_with(node, input_node) # layout_nodes = self.get_all_layout_nodes() def get_all_layout_nodes(self): layout_nodes = [] for node_meta in reversed(self.dag_ir.node_metas_topological_order()): if isinstance(node_meta, LayoutNode): layout_nodes.append(node_meta.name) return layout_nodes def get_propagation_direction(self, node: str): """ The logic is propagating all layout nodes away from the accumulator node. """ self.visited = [] self.get_influenced_users(node) nodes_influenced_dir_users = self.visited self.visited = [] self.get_influenced_inputs(node) nodes_influenced_dir_inputs = self.visited if "accum" in nodes_influenced_dir_users and "accum" not in nodes_influenced_dir_inputs: return "inputs" elif "accum" not in nodes_influenced_dir_users and "accum" in nodes_influenced_dir_inputs: return "users" else: raise RuntimeError("Unsolved propagation direction") # Get all influenced nodes if we propagate along the user direction def get_influenced_users(self, node: str): if node in self.visited: return self.visited.append(node) users = self.dag_ir.get_users(node) for user in users: self.get_influenced_users(user) user_inputs = [] for user in users: user_inputs.append(set(self.dag_ir.get_all_inputs(user))) if len(user_inputs) > 0: user_inputs = set.union(*user_inputs) user_inputs.remove(node) for input in user_inputs: self.get_influenced_inputs(input) # Get all influenced nodes if we propagate along the input direction def get_influenced_inputs(self, node: str): if node in self.visited: return self.visited.append(node) inputs = self.dag_ir.get_all_inputs(node) for input in inputs: self.get_influenced_inputs(input) input_users = [] for input in inputs: input_users.append(set(self.dag_ir.get_users(input))) if len(input_users) > 0: input_users = set.union(*input_users) input_users.remove(node) for user in input_users: self.get_influenced_users(user) def add_copy_before(self, layout_node_meta: LayoutNode, target: str): copied_node_meta = deepcopy(layout_node_meta) copied_node = f"{copied_node_meta.name}_copy{self.copy_cnt}" self.copy_cnt += 1 copied_node_meta.name = copied_node self.dag_ir.add_node(copied_node_meta) # Add edges target_inputs = self.dag_ir.get_all_inputs(target) for src in target_inputs: self.dag_ir.remove_edge(src, target) self.dag_ir.add_edge(src, copied_node) self.dag_ir.add_edge(copied_node, target) self.layout_nodes_worklist.append(copied_node) def add_copy_after(self, layout_node_meta: LayoutNode, target: str): copied_node_meta = deepcopy(layout_node_meta) copied_node = f"{copied_node_meta.name}_copy{self.copy_cnt}" self.copy_cnt += 1 copied_node_meta.name = copied_node self.dag_ir.add_node(copied_node_meta) # Add edges users = self.dag_ir.get_users(target) for user in users: self.dag_ir.remove_edge(target, user) self.dag_ir.add_edge(copied_node, user) self.dag_ir.add_edge(target, copied_node) self.layout_nodes_worklist.append(copied_node) # Propagate the layout `node` along the user direction def propagate_to_users(self, layout_node_meta: LayoutNode, node: str): """ Propagate layout node to users """ if node in self.visited: # Avoid applying twice return self.visited.append(node) node_meta = self.dag_ir.get_node_meta(node) if layout_node_meta.name != node: if isinstance(node_meta, LayoutNode): # Layout node is not transparent with layout node self.add_copy_before(layout_node_meta, node) return else: layout_node_meta.apply_to_user(node_meta) users = self.dag_ir.get_users(node) user_inputs = [] for user in users: user_inputs.append(set(self.dag_ir.get_all_inputs(user))) for user in users: self.propagate_to_users(layout_node_meta, user) if len(user_inputs) > 0: user_inputs = set.union(*user_inputs) user_inputs.remove(node) for input in user_inputs: self.propagate_to_inputs(layout_node_meta.get_inverse_node(), input) # Propagate the layout `node` along the input direction def propagate_to_inputs(self, layout_node_meta: LayoutNode, node: str): """ Propagate layout node to inputs """ if node in self.visited: # Avoid applying twice return self.visited.append(node) node_meta = self.dag_ir.get_node_meta(node) if layout_node_meta.name != node: if isinstance(node_meta, LayoutNode): # Layout node is not transparent with layout node self.add_copy_after(layout_node_meta, node) return else: layout_node_meta.apply_to_input(node_meta) inputs = self.dag_ir.get_all_inputs(node) input_users = [] for input in inputs: input_users.append(set(self.dag_ir.get_users(input))) for input in inputs: self.propagate_to_inputs(layout_node_meta, input) if len(input_users) > 0: input_users = set.union(*input_users) input_users.remove(node) for user in input_users: self.propagate_to_users(layout_node_meta.get_inverse_node(), user)
cutlass/python/cutlass/backend/evt/passes/pass_layout_elimination.py/0
{ "file_path": "cutlass/python/cutlass/backend/evt/passes/pass_layout_elimination.py", "repo_id": "cutlass", "token_count": 3853 }
52
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Utility functions for converting between frontend datatypes and CUTLASS datatypes """ import cutlass from cutlass_library import ( DataTypeSize, MathOperation, MathInstruction ) from cutlass.backend.library import ( TileDescription, ) bfloat16_available = None cupy_available = None numpy_available = None torch_available = None _library_to_cupy_dict = None _library_to_numpy_dict = None _library_to_torch_dict = None _torch_to_library_dict = None def is_numpy_available(): global numpy_available, _library_to_numpy_dict if numpy_available is None: try: import numpy as np numpy_available = True _library_to_numpy_dict = { cutlass.DataType.f16: np.float16, cutlass.DataType.f32: np.float32, cutlass.DataType.f64: np.float64, cutlass.DataType.s8: np.int8, cutlass.DataType.s32: np.int32, } except ImportError: numpy_available = False _library_to_numpy_dict = {} return numpy_available def is_numpy_tensor(inp) -> bool: if is_numpy_available(): import numpy as np return isinstance(inp, np.ndarray) return False def numpy_library_type(inp) -> cutlass.DataType: if is_numpy_available(): import numpy as np if inp == np.float16: return cutlass.DataType.f16 elif inp == np.float32: return cutlass.DataType.f32 elif inp == np.float64: return cutlass.DataType.f64 elif inp == np.int8: return cutlass.DataType.s8 elif inp == np.int32: return cutlass.DataType.s32 return None def numpy_type(inp): return _library_to_numpy_dict.get(inp, None) def is_cupy_available(): global cupy_available if cupy_available is None: try: import cupy as cp cupy_available = True _library_to_cupy_dict = { cutlass.DataType.f16: cp.float16, cutlass.DataType.f32: cp.float32, cutlass.DataType.f64: cp.float64, cutlass.DataType.s8: cp.int8, cutlass.DataType.s32: cp.int32, } except ImportError: cupy_available = False _library_to_cupy_dict = {} return cupy_available def is_cupy_tensor(inp) -> bool: if is_cupy_available(): import cupy as cp return isinstance(inp, cp.ndarray) return False def cupy_library_type(inp) -> cutlass.DataType: if is_cupy_available(): import cupy as cp if inp == cp.float16: return cutlass.DataType.f16 elif inp == cp.float32: return cutlass.DataType.f32 elif inp == cp.float64: return cutlass.DataType.f64 return None def cupy_type(inp): return _library_to_cupy_dict.get(inp, None) def is_torch_available(): global torch_available, _library_to_torch_dict, _torch_to_library_dict if torch_available is None: try: import torch torch_available = True _torch_to_library_dict = { torch.half: cutlass.DataType.f16, torch.float16: cutlass.DataType.f16, torch.bfloat16: cutlass.DataType.bf16, torch.float: cutlass.DataType.f32, torch.float32: cutlass.DataType.f32, torch.double: cutlass.DataType.f64, torch.float64: cutlass.DataType.f64, torch.int8: cutlass.DataType.s8, torch.int32: cutlass.DataType.s32, torch.uint8: cutlass.DataType.u8, } _library_to_torch_dict = { cutlass.DataType.f16: torch.half, cutlass.DataType.f16: torch.float16, cutlass.DataType.bf16: torch.bfloat16, cutlass.DataType.f32: torch.float, cutlass.DataType.f32: torch.float32, cutlass.DataType.f64: torch.double, cutlass.DataType.f64: torch.float64, cutlass.DataType.s8: torch.int8, cutlass.DataType.s32: torch.int32, cutlass.DataType.u8: torch.uint8, } def possibly_add_type(torch_type_name, cutlass_type): # Only try adding the type if the version of torch being used supports it if hasattr(torch, torch_type_name): torch_type = getattr(torch, torch_type_name) _torch_to_library_dict[torch_type] = cutlass_type _library_to_torch_dict[cutlass_type] = torch_type possibly_add_type("float8_e4m3fn", cutlass.DataType.e4m3) possibly_add_type("float8_e5m2", cutlass.DataType.e5m2) except ImportError: torch_available = False _torch_to_library_dict = {} _library_to_torch_dict = {} return torch_available def is_torch_tensor(inp) -> bool: if is_torch_available(): import torch return isinstance(inp, torch.Tensor) return False def torch_library_type(inp) -> cutlass.DataType: return _torch_to_library_dict.get(inp, None) def torch_type(inp): return _library_to_torch_dict.get(inp, None) def is_bfloat16_available(): global bfloat16_available if bfloat16_available is None: try: import bfloat16 bfloat16_available = True except ImportError: bfloat16_available = False return bfloat16_available def bfloat16_library_type(inp) -> cutlass.DataType: if is_bfloat16_available(): import bfloat16 if inp == bfloat16.bfloat16: return cutlass.DataType.bf16 def bfloat16_type(inp): if is_bfloat16_available(): import bfloat16 if inp == cutlass.DataType.bf16: return bfloat16.bfloat16 def library_type(inp): if inp in DataTypeSize: return inp for cvt_fn in [ bfloat16_library_type, cupy_library_type, numpy_library_type, torch_library_type, ]: out = cvt_fn(inp) if out is not None: return out raise Exception(f"No available conversion from type {inp} to a library type.") def _tensor_from_numpy(np_tensor): dtype = library_type(np_tensor.dtype) if np_tensor.flags.c_contiguous: layout = cutlass.LayoutType.RowMajor elif np_tensor.flags.f_contiguous: layout = cutlass.LayoutType.ColumnMajor return (dtype, layout) def _tensor_from_torch(pt_tensor): dtype = library_type(pt_tensor.dtype) return (dtype, cutlass.LayoutType.RowMajor) def get_datatype_and_layout(tensor): if (is_numpy_tensor(tensor) or is_cupy_tensor(tensor)): return _tensor_from_numpy(tensor) elif is_torch_tensor(tensor): return _tensor_from_torch(tensor) elif isinstance(tensor, float) or isinstance(tensor, int): return (cutlass.DataType.f32, cutlass.LayoutType.RowMajor) else: raise Exception(f"Unable to convert tensor of type {type(tensor)} to Python-bound CUTLASS datatype and layout.") def get_tensor_shape(tensor, op="GEMM"): if (is_numpy_tensor(tensor) or is_cupy_tensor(tensor)): return tensor.shape elif is_torch_tensor(tensor): size = tensor.size() if op == "CONV": # PyTorch Tensors have shape NCHW return (size[0], size[2], size[3], size[1]) else: return tuple(tensor.size()) elif isinstance(tensor, float) or isinstance(tensor, int): return (1,) else: raise Exception(f"Unable to convert tensor of type {type(tensor)} to Python-bound CUTLASS datatype and layout.") _math_operation_value_map = {x.value: x for x in MathOperation} def backend_math_operation(math_op: MathOperation): if math_op.value not in _math_operation_value_map.keys(): raise Exception(f"Unable to convert math operation of type {math_op} to backend math operation.") return _math_operation_value_map[math_op.value] def construct_backend_td(td: cutlass.TileDescription, kernel_schedule: cutlass.KernelScheduleType, epilogue_schedule: cutlass.EpilogueScheduleType, tile_scheduler: cutlass.TileSchedulerType) -> TileDescription: mi = td.math_instruction backend_mi = MathInstruction( mi.instruction_shape, mi.element_a, mi.element_b, mi.element_accumulator, mi.opcode_class, backend_math_operation(mi.math_operation) ) cluster_shape = td.cluster_shape if hasattr(td, "cluster_shape") else [1, 1, 1] return TileDescription(td.threadblock_shape, td.stages, td.warp_count, backend_mi, cluster_shape, kernel_schedule, epilogue_schedule, tile_scheduler) def td_from_profiler_op(op) -> TileDescription: """ Converts the profiler's TileDescription in ``op`` into the backend TileDescription :param op: profiler Operation :returns: backend TileDescription :rtype: cutlass.backend.TileDescription """ kschedule = op.kernel_schedule if hasattr(op, 'kernel_schedule') else None eschedule = op.epilogue_schedule if hasattr(op, 'epilogue_schedule') else None tschedule = op.tile_scheduler if hasattr(op, 'tile_scheduler') else None return construct_backend_td(op.tile_description, kschedule, eschedule, tschedule) def td_from_profiler_td(td: TileDescription) -> TileDescription: """ Converts the profiler's TileDescription into the backend TileDescription :param td: profiler TileDescription :type td: cutlass.TileDescription :returns: backend TileDescription :rtype: cutlass.backend.TileDescription """ return construct_backend_td(td, kernel_schedule=None, epilogue_schedule=None, tile_scheduler=None) def to_camel_case(snake_str): return "".join(x.capitalize() for x in snake_str.lower().split("_")) def getattr_enum(obj, attr_name): # The attr_name is under the snake_case camel_attr = to_camel_case(attr_name) if hasattr(obj, camel_attr): return getattr(obj, camel_attr) else: raise Exception(f"Invalid option: {attr_name}")
cutlass/python/cutlass/utils/datatypes.py/0
{ "file_path": "cutlass/python/cutlass/utils/datatypes.py", "repo_id": "cutlass", "token_count": 5250 }
53
/* Some sane resets. */ html { height: 100%; } body { margin: 0; min-height: 100%; } /* All the flexbox magic! */ body, .sb-announcement, .sb-content, .sb-main, .sb-container, .sb-container__inner, .sb-article-container, .sb-footer-content, .sb-header, .sb-header-secondary, .sb-footer { display: flex; } /* These order things vertically */ body, .sb-main, .sb-article-container { flex-direction: column; } /* Put elements in the center */ .sb-header, .sb-header-secondary, .sb-container, .sb-content, .sb-footer, .sb-footer-content { justify-content: center; } /* Put elements at the ends */ .sb-article-container { justify-content: space-between; } /* These elements grow. */ .sb-main, .sb-content, .sb-container, article { flex-grow: 1; } /* Because padding making this wider is not fun */ article { box-sizing: border-box; } /* The announcements element should never be wider than the page. */ .sb-announcement { max-width: 100%; } .sb-sidebar-primary, .sb-sidebar-secondary { flex-shrink: 0; width: 17rem; } .sb-announcement__inner { justify-content: center; box-sizing: border-box; height: 3rem; overflow-x: auto; white-space: nowrap; } /* Sidebars, with checkbox-based toggle */ .sb-sidebar-primary, .sb-sidebar-secondary { position: fixed; height: 100%; top: 0; } .sb-sidebar-primary { left: -17rem; transition: left 250ms ease-in-out; } .sb-sidebar-secondary { right: -17rem; transition: right 250ms ease-in-out; } .sb-sidebar-toggle { display: none; } .sb-sidebar-overlay { position: fixed; top: 0; width: 0; height: 0; transition: width 0ms ease 250ms, height 0ms ease 250ms, opacity 250ms ease; opacity: 0; background-color: rgba(0, 0, 0, 0.54); } #sb-sidebar-toggle--primary:checked ~ .sb-sidebar-overlay[for="sb-sidebar-toggle--primary"], #sb-sidebar-toggle--secondary:checked ~ .sb-sidebar-overlay[for="sb-sidebar-toggle--secondary"] { width: 100%; height: 100%; opacity: 1; transition: width 0ms ease, height 0ms ease, opacity 250ms ease; } #sb-sidebar-toggle--primary:checked ~ .sb-container .sb-sidebar-primary { left: 0; } #sb-sidebar-toggle--secondary:checked ~ .sb-container .sb-sidebar-secondary { right: 0; } /* Full-width mode */ .drop-secondary-sidebar-for-full-width-content .hide-when-secondary-sidebar-shown { display: none !important; } .drop-secondary-sidebar-for-full-width-content .sb-sidebar-secondary { display: none !important; } /* Mobile views */ .sb-page-width { width: 100%; } .sb-article-container, .sb-footer-content__inner, .drop-secondary-sidebar-for-full-width-content .sb-article, .drop-secondary-sidebar-for-full-width-content .match-content-width { width: 100vw; } .sb-article, .match-content-width { padding: 0 1rem; box-sizing: border-box; } @media (min-width: 32rem) { .sb-article, .match-content-width { padding: 0 2rem; } } /* Tablet views */ @media (min-width: 42rem) { .sb-article-container { width: auto; } .sb-footer-content__inner, .drop-secondary-sidebar-for-full-width-content .sb-article, .drop-secondary-sidebar-for-full-width-content .match-content-width { width: 42rem; } .sb-article, .match-content-width { width: 42rem; } } @media (min-width: 46rem) { .sb-footer-content__inner, .drop-secondary-sidebar-for-full-width-content .sb-article, .drop-secondary-sidebar-for-full-width-content .match-content-width { width: 46rem; } .sb-article, .match-content-width { width: 46rem; } } @media (min-width: 50rem) { .sb-footer-content__inner, .drop-secondary-sidebar-for-full-width-content .sb-article, .drop-secondary-sidebar-for-full-width-content .match-content-width { width: 50rem; } .sb-article, .match-content-width { width: 50rem; } } /* Tablet views */ @media (min-width: 59rem) { .sb-sidebar-secondary { position: static; } .hide-when-secondary-sidebar-shown { display: none !important; } .sb-footer-content__inner, .drop-secondary-sidebar-for-full-width-content .sb-article, .drop-secondary-sidebar-for-full-width-content .match-content-width { width: 59rem; } .sb-article, .match-content-width { width: 42rem; } } @media (min-width: 63rem) { .sb-footer-content__inner, .drop-secondary-sidebar-for-full-width-content .sb-article, .drop-secondary-sidebar-for-full-width-content .match-content-width { width: 63rem; } .sb-article, .match-content-width { width: 46rem; } } @media (min-width: 67rem) { .sb-footer-content__inner, .drop-secondary-sidebar-for-full-width-content .sb-article, .drop-secondary-sidebar-for-full-width-content .match-content-width { width: 67rem; } .sb-article, .match-content-width { width: 50rem; } } /* Desktop views */ @media (min-width: 76rem) { .sb-sidebar-primary { position: static; } .hide-when-primary-sidebar-shown { display: none !important; } .sb-footer-content__inner, .drop-secondary-sidebar-for-full-width-content .sb-article, .drop-secondary-sidebar-for-full-width-content .match-content-width { width: 59rem; } .sb-article, .match-content-width { width: 42rem; } } /* Full desktop views */ @media (min-width: 80rem) { .sb-article, .match-content-width { width: 46rem; } .sb-footer-content__inner, .drop-secondary-sidebar-for-full-width-content .sb-article, .drop-secondary-sidebar-for-full-width-content .match-content-width { width: 63rem; } } @media (min-width: 84rem) { .sb-article, .match-content-width { width: 50rem; } .sb-footer-content__inner, .drop-secondary-sidebar-for-full-width-content .sb-article, .drop-secondary-sidebar-for-full-width-content .match-content-width { width: 67rem; } } @media (min-width: 88rem) { .sb-footer-content__inner, .drop-secondary-sidebar-for-full-width-content .sb-article, .drop-secondary-sidebar-for-full-width-content .match-content-width { width: 67rem; } .sb-page-width { width: 88rem; } }
cutlass/python/docs/_static/skeleton.css/0
{ "file_path": "cutlass/python/docs/_static/skeleton.css", "repo_id": "cutlass", "token_count": 2359 }
54
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Functions for manipulating IntTuples """ from functools import reduce from itertools import chain from typing import Union from .typing import Integer def is_int(x): return isinstance(x, Integer) def is_tuple(x): return isinstance(x, tuple) def flatten(t): if is_tuple(t): if len(t) == 0: return () else: return tuple(i for a in t for i in flatten(a)) else: return (t,) def signum(a): return bool(a > 0) - bool(a < 0) def product(a): if is_tuple(a): return reduce(lambda val,elem : val*product(elem), a, 1) else: return a def inner_product(a, b): if is_tuple(a): # tuple tuple assert len(a) == len(b) return sum(inner_product(x,y) for x,y in zip(a,b)) else: # "int" "int" assert not is_tuple(b) return a * b def tuple_max(a): if is_tuple(a): return max(tuple_max(x) for x in a) else: return a def elem_scale(a, b): if is_tuple(a): if is_tuple(b): # tuple tuple assert len(a) == len(b) return tuple(elem_scale(x,y) for x,y in zip(a,b)) else: # tuple "int" assert False # Error else: if is_tuple(b): # "int" tuple return elem_scale(a, product(b)) else: # "int" "int" return a * b # Inclusive prefix ceil div with output congruent to input a def shape_div(a, b): if is_tuple(a): if is_tuple(b): # tuple tuple assert len(a) == len(b) return tuple(shape_div(x,y) for x,y in zip(a,b)) else: # tuple "int" #r = [shape_div(a[0],b)] + [shape_div(a[i],b := shape_div(b, product(a[i-1]))) for i in range(1,len(a))] r = [] for v in a: r.append(shape_div(v,b)) b = shape_div(b,product(v)) return tuple(r) else: if is_tuple(b): # "int" tuple return shape_div(a, product(b)) else: # "int" "int" assert a % b == 0 or b % a == 0 #return -(-a // b) # Python exclusive impl: "//" is always floor div if a % b == 0: return a // b else: return signum(a*b) # Exclusive prefix product with output congruent to input a def prefix_product(a, init=1): if is_tuple(a): if is_tuple(init): # tuple tuple assert len(a) == len(init) return tuple(prefix_product(x,i) for x,i in zip(a,init)) else: # tuple "int" #r = [prefix_product(a[0],init)] + [prefix_product(a[i],init := init * product(a[i-1])) for i in range(1,len(a))] r = [] for v in a: r.append(prefix_product(v,init)) init = init * product(v) return tuple(r) else: if is_tuple(init): # "int" tuple assert False # Error else: # "int" "int" return init def idx2crd(idx, shape, stride=None): if stride is None: stride = prefix_product(shape) if is_tuple(idx): if is_tuple(shape): # tuple tuple tuple assert len(idx) == len(shape) and len(idx) == len(stride) return tuple(idx2crd(i, s, d) for i, s, d in zip(idx,shape,stride)) else: # tuple "int" "int" assert False # Error else: if is_tuple(shape): # "int" tuple tuple assert len(shape) == len(stride) return tuple(idx2crd(idx, s, d) for s,d in zip(shape,stride)) else: # "int" "int" "int" return (idx // stride) % shape def crd2idx(crd, shape, stride=None): if stride is None: stride = prefix_product(shape) if is_tuple(crd): if is_tuple(shape): # tuple tuple tuple assert len(crd) == len(shape) and len(crd) == len(stride) return sum(crd2idx(c, s, d) for c, s, d in zip(crd, shape, stride)) else: # tuple "int" "int" assert False, f"crd={crd}, shape={shape}" # Error else: if crd is None: crd = 0 if is_tuple(shape): # "int" tuple tuple assert len(shape) == len(stride) result = 0 for i in range(len(shape)-1): result += crd2idx(crd % product(shape[i]), shape[i], stride[i]) crd = crd // product(shape[i]) return result + crd2idx(crd, shape[-1], stride[-1]) else: # "int" "int" "int" return crd * stride # Transform crd into the dst_shape's iteration space def crd2crd(crd, dst_shape, src_shape=None): if is_tuple(crd): if is_tuple(dst_shape): # tuple tuple assert len(crd) == len(dst_shape) return tuple(crd2crd(x, y) for x, y in zip(crd,dst_shape)) else: # tuple "int" # Ambiguous unless we have src_shape assert src_shape is not None return crd2idx(crd, src_shape) else: if is_tuple(dst_shape): # "int" tuple return idx2crd(crd, dst_shape) else: # "int" "int" assert crd < dst_shape return crd # Filter trg according to crd: keep only elements of trg that are paired with None def slice_(crd: Union[None, tuple, int], trg: Union[tuple, int]): if is_tuple(crd): if is_tuple(trg): # tuple tuple assert len(crd) == len(trg) # match C++ behavior of `filter_tuple` using `tuple_cat(...)` return tuple(chain(*filter(lambda x: x != (), [slice_(c, s) for c, s in zip(crd, trg)]))) else: assert False # tuple "int" : Error elif crd is None: # match C++ behavior `return cute::tuple<B>{b};` return (trg,) else: return () # Determine if None appears at any of an int_tuples' terminals def has_none(a: Union[None, tuple, int]): if is_tuple(a): return any(has_none(v) for v in a) else: return a is None
cutlass/python/pycute/int_tuple.py/0
{ "file_path": "cutlass/python/pycute/int_tuple.py", "repo_id": "cutlass", "token_count": 3526 }
55
################################################################################ # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################ """ Unit test for load nodes in SM90 """ import logging import unittest import cutlass from cutlass.backend import * from cutlass.epilogue import * from utils.evt_testbed import EVTTestBed, EVTTestCaseBase cutlass.set_log_level(logging.WARNING) @unittest.skipIf(device_cc() not in [80, 86, 89, 90], "This unittest is only supported on CC [80, 86, 89, 90]") class TestEVTLoad(EVTTestCaseBase): def test_tensor_load(self): """ Load extra tensor with shape [m, n] """ def evt_tensor_load(accum, C, aux, aux_batch): D = accum + C + aux + aux_batch return D for m, n, k, l in self.get_problem_sizes(8): example_inputs = { "accum": self.fake_tensor(self.element, (l, m, n)), "C": self.fake_tensor(self.element, (l, m, n)), "aux": self.fake_tensor(self.element, (m, n)), "aux_batch": self.fake_tensor(np.float32, (l, m, n)), "D": self.fake_tensor(self.element, (l, m, n)), } launcher = EVTTestBed(self.element, evt_tensor_load, example_inputs) input_keys = ["C", "aux", "aux_batch"] result_keys = ["D"] launcher.verify((m, n, k), input_keys, result_keys, l) def test_row_broadcast(self): """ Load extra tensor with shape [1, n] """ def evt_row_broadcast(accum, C, bias, bias_batch): D = accum + C + bias + bias_batch return D for m, n, k, l in self.get_problem_sizes(8): example_inputs = { "accum": self.fake_tensor(self.element, (l, m, n)), "C": self.fake_tensor(self.element, (l, m, n)), "bias": self.fake_tensor(self.element, (n,)), "bias_batch": self.fake_tensor(np.float32, (l, 1, n)), "D": self.fake_tensor(self.element, (l, m, n)), } launcher = EVTTestBed(self.element, evt_row_broadcast, example_inputs) input_keys = ["C", "bias", "bias_batch"] result_keys = ["D"] launcher.verify((m, n, k), input_keys, result_keys, l) def test_column_broadcast(self): """ Load extra tensor with shape [m, 1] """ def evt_column_broadcast(accum, C, bias, bias_batch): D = accum + C + bias + bias_batch return D for m, n, k, l in self.get_problem_sizes(8): example_inputs = { "accum": self.fake_tensor(self.element, (l, m, n)), "C": self.fake_tensor(self.element, (l, m, n)), "bias": self.fake_tensor(self.element, (m, 1)), "bias_batch": self.fake_tensor(np.float32, (l, m, 1)), "D": self.fake_tensor(self.element, (l, m, n)), } launcher = EVTTestBed(self.element, evt_column_broadcast, example_inputs) input_keys = ["C", "bias", "bias_batch"] result_keys = ["D"] launcher.verify((m, n, k), input_keys, result_keys, l) def test_scalar_broadcast(self): """ Load extra tensor with shape [1, 1] """ def evt_scalar_broadcast(accum, C, alpha, alpha_batch): D = accum + C + alpha + alpha_batch return D for m, n, k, l in self.get_problem_sizes(8): example_inputs = { "accum": self.fake_tensor(self.element, (l, m, n)), "C": self.fake_tensor(self.element, (l, m, n)), "alpha": 0.5, "alpha_batch": self.fake_tensor(np.float32, (l, 1, 1)), "D": self.fake_tensor(self.element, (l, m, n)), } launcher = EVTTestBed(self.element, evt_scalar_broadcast, example_inputs) input_keys = ["C", "alpha", "alpha_batch"] result_keys = ["D"] launcher.verify((m, n, k), input_keys, result_keys, l) if __name__ == '__main__': unittest.main()
cutlass/test/python/cutlass/evt/evt_load_sm80_90.py/0
{ "file_path": "cutlass/test/python/cutlass/evt/evt_load_sm80_90.py", "repo_id": "cutlass", "token_count": 2528 }
56
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "cutlass_unit_test.h" #include <cutlass/trace.h> #include <cute/tensor.hpp> TEST(CuTe_core, Tuple) { using namespace cute; CUTLASS_TRACE_HOST("-------------------------------"); CUTLASS_TRACE_HOST("SIMPLE STATIC AND DYNAMIC TUPLES"); CUTLASS_TRACE_HOST("-------------------------------"); using tuple_2d_s_type = tuple<_8, _4>; // (8,4) using tuple_3d_s_type = tuple<_8, _4, _2>; // (8,4,2) using tuple_3h_s_type = tuple<tuple<_1, _2>, _8, _2>; // ((1,2),8,2) using tuple_2d_d_type = tuple<int, int>; // (8,4) using tuple_3d_d_type = tuple<int, int, int>; // (8,4,2) using tuple_3h_d_type = tuple<tuple<int, int>, int, int>; // ((1,2),8,2) using tuple_2d_m_type = tuple<_8, int>; // (8,4) using tuple_3d_m_type = tuple<int, int, _2>; // (8,4,2) using tuple_3h_m_type = tuple<tuple<int, _2>, int, int>; // ((1,2),8,2) tuple_2d_s_type tuple_2d_s; tuple_3d_s_type tuple_3d_s; tuple_3h_s_type tuple_3h_s; tuple_2d_d_type tuple_2d_d(8,4); tuple_3d_d_type tuple_3d_d(8,4,2); tuple_3h_d_type tuple_3h_d(tuple<int,int>(1,2),8,2); tuple_2d_m_type tuple_2d_m(_8{}, 4); tuple_3d_m_type tuple_3d_m(8,4,_2{}); tuple_3h_m_type tuple_3h_m(tuple<int,_2>(1,_2{}),8,2); CUTLASS_TRACE_HOST(tuple_2d_s << (is_static<tuple_2d_s_type>::value ? " Static " : " Dynamic ") << "sizeof = " << sizeof(tuple_2d_s_type)); ASSERT_TRUE(is_static<tuple_2d_s_type>::value == true); ASSERT_TRUE(sizeof(tuple_2d_s_type) == 1); ASSERT_TRUE(std::is_empty<tuple_2d_s_type>::value); CUTLASS_TRACE_HOST(tuple_3d_s << (is_static<tuple_3d_s_type>::value ? " Static " : " Dynamic ") << "sizeof = " << sizeof(tuple_3d_s_type)); ASSERT_TRUE(is_static<tuple_3d_s_type>::value == true); ASSERT_TRUE(sizeof(tuple_3d_s_type) == 1); ASSERT_TRUE(std::is_empty<tuple_3d_s_type>::value); CUTLASS_TRACE_HOST(tuple_3h_s << (is_static<tuple_3h_s_type>::value ? " Static " : " Dynamic ") << "sizeof = " << sizeof(tuple_3h_s_type)); ASSERT_TRUE(is_static<tuple_3h_s_type>::value == true); ASSERT_TRUE(sizeof(tuple_3h_s_type) == 1); ASSERT_TRUE(std::is_empty<tuple_3h_s_type>::value); CUTLASS_TRACE_HOST(tuple_2d_d << (is_static<tuple_2d_d_type>::value ? " Static " : " Dynamic ") << "sizeof = " << sizeof(tuple_2d_d_type)); ASSERT_TRUE(is_static<tuple_2d_d_type>::value == false); ASSERT_TRUE(sizeof(tuple_2d_d_type) == 8); ASSERT_TRUE(!std::is_empty<tuple_2d_d_type>::value); CUTLASS_TRACE_HOST(tuple_3d_d << (is_static<tuple_3d_d_type>::value ? " Static " : " Dynamic ") << "sizeof = " << sizeof(tuple_3d_d_type)); ASSERT_TRUE(is_static<tuple_3d_d_type>::value == false); ASSERT_TRUE(sizeof(tuple_3d_d_type) == 12); ASSERT_TRUE(!std::is_empty<tuple_3d_d_type>::value); CUTLASS_TRACE_HOST(tuple_3h_d << (is_static<tuple_3h_d_type>::value ? " Static " : " Dynamic ") << "sizeof = " << sizeof(tuple_3h_d_type)); ASSERT_TRUE(is_static<tuple_3h_d_type>::value == false); ASSERT_TRUE(sizeof(tuple_3h_d_type) == 16); ASSERT_TRUE(!std::is_empty<tuple_3h_d_type>::value); CUTLASS_TRACE_HOST(tuple_2d_m << (is_static<tuple_2d_m_type>::value ? " Static " : " Dynamic ") << "sizeof = " << sizeof(tuple_2d_m_type)); ASSERT_TRUE(is_static<tuple_2d_m_type>::value == false); ASSERT_TRUE(sizeof(tuple_2d_m_type) == 4); ASSERT_TRUE(!std::is_empty<tuple_2d_m_type>::value); CUTLASS_TRACE_HOST(tuple_3d_m << (is_static<tuple_3d_m_type>::value ? " Static " : " Dynamic ") << "sizeof = " << sizeof(tuple_3d_m_type)); ASSERT_TRUE(is_static<tuple_3d_m_type>::value == false); ASSERT_TRUE(sizeof(tuple_3d_m_type) == 8); ASSERT_TRUE(!std::is_empty<tuple_3d_m_type>::value); CUTLASS_TRACE_HOST(tuple_3h_m << (is_static<tuple_3h_m_type>::value ? " Static " : " Dynamic ") << "sizeof = " << sizeof(tuple_3h_m_type)); ASSERT_TRUE(is_static<tuple_3h_m_type>::value == false); ASSERT_TRUE(sizeof(tuple_3h_m_type) == 12); ASSERT_TRUE(!std::is_empty<tuple_3h_m_type>::value); CUTLASS_TRACE_HOST("-------------------------------"); CUTLASS_TRACE_HOST("SIMPLE TUPLE OPS"); CUTLASS_TRACE_HOST("-------------------------------"); CUTLASS_TRACE_HOST("product(" << tuple_2d_s << ") => " << product(tuple_2d_s)); CUTE_STATIC_ASSERT_V(product(tuple_2d_s) == _32{}); CUTLASS_TRACE_HOST("product(" << tuple_3d_s << ") => " << product(tuple_3d_s)); CUTE_STATIC_ASSERT_V(product(tuple_3d_s) == _64{}); CUTLASS_TRACE_HOST("product(" << tuple_3h_s << ") => " << product(tuple_3h_s)); CUTE_STATIC_ASSERT_V(product(tuple_3h_s) == _32{}); CUTLASS_TRACE_HOST("product(" << tuple_2d_d << ") => " << product(tuple_2d_d)); ASSERT_TRUE(product(tuple_2d_d) == 32); CUTLASS_TRACE_HOST("product(" << tuple_3d_d << ") => " << product(tuple_3d_d)); ASSERT_TRUE(product(tuple_3d_d) == 64); CUTLASS_TRACE_HOST("product(" << tuple_3h_d << ") => " << product(tuple_3h_d)); ASSERT_TRUE(product(tuple_3h_d) == 32); CUTLASS_TRACE_HOST("product(" << tuple_2d_m << ") => " << product(tuple_2d_m)); ASSERT_TRUE(product(tuple_2d_m) == 32); CUTLASS_TRACE_HOST("product(" << tuple_3d_m << ") => " << product(tuple_3d_m)); ASSERT_TRUE(product(tuple_3d_m) == 64); CUTLASS_TRACE_HOST("product(" << tuple_3h_m << ") => " << product(tuple_3h_m)); ASSERT_TRUE(product(tuple_3h_m) == 32); CUTLASS_TRACE_HOST("max(" << tuple_2d_s << ") => " << max(tuple_2d_s)); CUTE_STATIC_ASSERT_V(max(tuple_2d_s) == _8{}); CUTLASS_TRACE_HOST("max(" << tuple_3d_s << ") => " << max(tuple_3d_s)); CUTE_STATIC_ASSERT_V(max(tuple_3d_s) == _8{}); CUTLASS_TRACE_HOST("max(" << tuple_3h_s << ") => " << max(tuple_3h_s)); CUTE_STATIC_ASSERT_V(max(tuple_3h_s) == _8{}); CUTLASS_TRACE_HOST("max(" << tuple_2d_d << ") => " << max(tuple_2d_d)); ASSERT_TRUE(max(tuple_2d_d) == 8); CUTLASS_TRACE_HOST("max(" << tuple_3d_d << ") => " << max(tuple_3d_d)); ASSERT_TRUE(max(tuple_3d_d) == 8); CUTLASS_TRACE_HOST("max(" << tuple_3h_d << ") => " << max(tuple_3h_d)); ASSERT_TRUE(max(tuple_3h_d) == 8); CUTLASS_TRACE_HOST("max(" << tuple_2d_m << ") => " << max(tuple_2d_m)); ASSERT_TRUE(max(tuple_2d_m) == 8); CUTLASS_TRACE_HOST("max(" << tuple_3d_m << ") => " << max(tuple_3d_m)); ASSERT_TRUE(max(tuple_3d_m) == 8); CUTLASS_TRACE_HOST("max(" << tuple_3h_m << ") => " << max(tuple_3h_m)); ASSERT_TRUE(max(tuple_3h_m) == 8); // 2d s|d|m CUTLASS_TRACE_HOST("inner_product(" << tuple_2d_s << ", " << tuple_2d_s << ") => " << inner_product(tuple_2d_s, tuple_2d_s)); CUTE_STATIC_ASSERT_V(inner_product(tuple_2d_s, tuple_2d_s) == Int<80>{}); CUTLASS_TRACE_HOST("inner_product(" << tuple_2d_d << ", " << tuple_2d_d << ") => " << inner_product(tuple_2d_d, tuple_2d_d)); ASSERT_TRUE(inner_product(tuple_2d_d, tuple_2d_d) == 80); CUTLASS_TRACE_HOST("inner_product(" << tuple_2d_m << ", " << tuple_2d_m << ") => " << inner_product(tuple_2d_m, tuple_2d_m)); ASSERT_TRUE(inner_product(tuple_2d_m, tuple_2d_m) == 80); // 3d s|d|m CUTLASS_TRACE_HOST("inner_product(" << tuple_3d_s << ", " << tuple_3d_s << ") => " << inner_product(tuple_3d_s, tuple_3d_s)); CUTE_STATIC_ASSERT_V(inner_product(tuple_3d_s, tuple_3d_s) == Int<84>{}); CUTLASS_TRACE_HOST("inner_product(" << tuple_3d_d << ", " << tuple_3d_d << ") => " << inner_product(tuple_3d_d, tuple_3d_d)); ASSERT_TRUE(inner_product(tuple_3d_d, tuple_3d_d) == 84); CUTLASS_TRACE_HOST("inner_product(" << tuple_3d_m << ", " << tuple_3d_m << ") => " << inner_product(tuple_3d_m, tuple_3d_m)); ASSERT_TRUE(inner_product(tuple_3d_m, tuple_3d_m) == 84); // 3h s|d|m CUTLASS_TRACE_HOST("inner_product(" << tuple_3h_s << ", " << tuple_3h_s << ") => " << inner_product(tuple_3h_s, tuple_3h_s)); CUTE_STATIC_ASSERT_V(inner_product(tuple_3h_s, tuple_3h_s) == Int<73>{}); CUTLASS_TRACE_HOST("inner_product(" << tuple_3h_d << ", " << tuple_3h_d << ") => " << inner_product(tuple_3h_d, tuple_3h_d)); ASSERT_TRUE(inner_product(tuple_3h_d, tuple_3h_d) == 73); CUTLASS_TRACE_HOST("inner_product(" << tuple_3h_m << ", " << tuple_3h_m << ") => " << inner_product(tuple_3h_m, tuple_3h_m)); ASSERT_TRUE(inner_product(tuple_3h_m, tuple_3h_m) == 73); CUTLASS_TRACE_HOST("col_major(" << tuple_2d_s << ") => " << compact_col_major(tuple_2d_s)); CUTE_STATIC_ASSERT_V((compact_col_major(tuple_2d_s) == make_tuple(_1{},_8{}))); CUTLASS_TRACE_HOST("col_major(" << tuple_3d_s << ") => " << compact_col_major(tuple_3d_s)); CUTE_STATIC_ASSERT_V((compact_col_major(tuple_3d_s) == make_tuple(_1{},_8{},_32{}))); CUTLASS_TRACE_HOST("col_major(" << tuple_3h_s << ") => " << compact_col_major(tuple_3h_s)); CUTE_STATIC_ASSERT_V((compact_col_major(tuple_3h_s) == make_tuple(make_tuple(_0{},_1{}),_2{},_16{}))); CUTLASS_TRACE_HOST("col_major(" << tuple_2d_d << ") => " << compact_col_major(tuple_2d_d)); ASSERT_TRUE((compact_col_major(tuple_2d_d) == make_tuple(_1{},8))); CUTLASS_TRACE_HOST("col_major(" << tuple_3d_d << ") => " << compact_col_major(tuple_3d_d)); ASSERT_TRUE((compact_col_major(tuple_3d_d) == make_tuple(_1{},8,32))); CUTLASS_TRACE_HOST("col_major(" << tuple_3h_d << ") => " << compact_col_major(tuple_3h_d)); ASSERT_TRUE((compact_col_major(tuple_3h_d) == make_tuple(make_tuple(_1{},1),2,16))); CUTLASS_TRACE_HOST("col_major(" << tuple_2d_m << ") => " << compact_col_major(tuple_2d_m)); ASSERT_TRUE((compact_col_major(tuple_2d_m) == make_tuple(_1{},_8{}))); CUTLASS_TRACE_HOST("col_major(" << tuple_3d_m << ") => " << compact_col_major(tuple_3d_m)); ASSERT_TRUE((compact_col_major(tuple_3d_m) == make_tuple(_1{},8,32))); CUTLASS_TRACE_HOST("col_major(" << tuple_3h_m << ") => " << compact_col_major(tuple_3h_m)); ASSERT_TRUE((compact_col_major(tuple_3h_m) == make_tuple(make_tuple(_1{},1),2,16))); CUTLASS_TRACE_HOST("-------------------------------"); CUTLASS_TRACE_HOST("SLICING TUPLES"); CUTLASS_TRACE_HOST("-------------------------------"); { auto a = Coord<_2,_3,_4,Coord<_5,_6>>{}; CUTLASS_TRACE_HOST("a = " << a); CUTLASS_TRACE_HOST("a(1) = " << slice(1, a)); CUTLASS_TRACE_HOST("a(_) = " << slice(_, a)); CUTLASS_TRACE_HOST("a(_,1,_,_) = " << slice(make_coord(_,1,_,_), a)); CUTLASS_TRACE_HOST("a(_,1,_,(_,_)) = " << slice(make_coord(_,1,_,make_coord(_,_)), a)); CUTLASS_TRACE_HOST("a(_,1,_,(_,2)) = " << slice(make_coord(_,1,_,make_coord(_,2)), a)); CUTLASS_TRACE_HOST("a(_,1,_,(1,2)) = " << slice(make_coord(_,1,_,make_coord(1,2)), a)); } CUTLASS_TRACE_HOST("-------------------------------"); CUTLASS_TRACE_HOST("DICING TUPLES"); CUTLASS_TRACE_HOST("-------------------------------"); { auto a = Coord<_2,_3,_4,Coord<_5,_6>>{}; CUTLASS_TRACE_HOST("a = " << a); CUTLASS_TRACE_HOST("a(1) = " << dice(1, a)); CUTLASS_TRACE_HOST("a(_) = " << dice(_, a)); CUTLASS_TRACE_HOST("a(_,1,_,_) = " << dice(make_coord(_,1,_,_), a)); CUTLASS_TRACE_HOST("a(_,1,_,(_,_)) = " << dice(make_coord(_,1,_,make_coord(_,_)), a)); CUTLASS_TRACE_HOST("a(_,1,_,(_,2)) = " << dice(make_coord(_,1,_,make_coord(_,2)), a)); CUTLASS_TRACE_HOST("a(_,1,_,(1,2)) = " << dice(make_coord(_,1,_,make_coord(1,2)), a)); } }
cutlass/test/unit/cute/core/tuple.cpp/0
{ "file_path": "cutlass/test/unit/cute/core/tuple.cpp", "repo_id": "cutlass", "token_count": 6293 }
57
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include <fstream> #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/tensor_fill.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename TileIterator> __global__ void kernel_store_iterator( typename TileIterator::Params params, typename TileIterator::TensorRef ref, cutlass::MatrixCoord extent) { TileIterator iterator(params, ref.data(), extent, threadIdx.x, {0, 0}); typename TileIterator::Fragment fragment; CUTLASS_PRAGMA_NO_UNROLL for (int iter = 0; iter < TileIterator::ThreadMap::Count::kTile; ++iter) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < TileIterator::Fragment::kElements; ++i) { typename TileIterator::Element tidx(iter + 1); fragment[i] = tidx; } iterator.store(fragment); ++iterator; } } ///////////////////////////////////////////////////////////////////////////////////////////////// } } } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T, typename Layout> static bool verify_footprint(cutlass::TensorView<T, Layout> view, cutlass::MatrixCoord extent) { for (int r = 0; r < view.extent().row(); ++r) { for (int c = 0; c < view.extent().column(); ++c) { cutlass::MatrixCoord coord{r, c}; bool within = coord < extent; if (within) { if (view.at(coord) == T(0)) { return false; } } else { if (view.at(coord) != T(0)) { return false; } } } } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, tensor_op_64x64x32_64x64x8) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 32; // // The following tests were used to develop the OutputTileOptimalThreadMap // metaprogram. The definitions in the disabled blocks of code in this and // the following tests are hand-written quantities. They are expected to // match what is defined in the ThreadMap. // #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<64, 8, 1, 1, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 8, 1, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<64, 64>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 64, // column 8, // row 1, // group 1, // cluster 1 // iterations >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 1, // group 1, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 2, // row 1, // group 1, // cluster 1 // iterations >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 8, // row 1, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{64, 64}; cutlass::MatrixCoord output_extent{62, 56}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed) { std::ofstream output("tensor_op_64x64x32_64x64x8.csv"); output << host_tensor.host_view(); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, tensor_op_128x64x32_64x64x8) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 64; #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<128, 8, 2, 1, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<64, 128>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 64, // column 8, // row 2, // group 1, // cluster 8 // tile >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 2, // row 2, // group 1, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 64, // group 1, // cluster 1 // tile >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 8, // row 1, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{128, 64}; cutlass::MatrixCoord output_extent{125, 56}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed) { std::ofstream output("tensor_op_128x64x32_64x64x8.csv"); output << host_tensor.host_view(); } } ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, tensor_op_128x256x32_64x64x8) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 256; #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<256, 8, 2, 1, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<256, 128>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 256, // column 8, // row 2, // group 1, // cluster 8 // tile >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 2, // row 2, // group 1, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 64, // group 1, // cluster 1 // tile >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 8, // row 1, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{128, 256}; cutlass::MatrixCoord output_extent{123, 252}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed) { std::ofstream output("tensor_op_128x256x32_64x64x8.csv"); output << host_tensor.host_view(); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, volta_tensor_op_64x64x32_64x64x4) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 32; #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<64, 2, 4, 1, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<64, 8>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 64, // column 2, // row 4, // group 1, // cluster 8 // iterations >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 1, // row 4, // group 1, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 1, // row 8, // group 1, // cluster 1 // iterations >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 2, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{64, 64}; cutlass::MatrixCoord output_extent{62, 56}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed) { std::ofstream output("volta_tensor_op_64x64x32_64x64x4.csv"); output << host_tensor.host_view(); } } /////////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, volta_tensor_op_64x128x32_32x64x4) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 128; #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<128, 2, 4, 1, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<128, 8>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 128, // column 2, // row 2, // group 2, // cluster 8 // iterations >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 1, // row 1, // group 2, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 1, // row 8, // group 32, // cluster 1 // iterations >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 4, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{64, 128}; cutlass::MatrixCoord output_extent{57, 124}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed) { std::ofstream output("volta_tensor_op_64x128x32_32x64x4.csv"); output << host_tensor.host_view(); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, volta_tensor_op_128x256x32_64x64x4) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 256; #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<256, 2, 4, 2, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<256, 16>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 256, // column 2, // row 4, // group 2, // cluster 8 // iterations >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 1, // row 2, // group 2, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 1, // row 16, // group 64, // cluster 1 // iterations >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 2, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{128, 256}; cutlass::MatrixCoord output_extent{128, 256}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed || true) { std::ofstream output("volta_tensor_op_128x256x32_64x64x4.csv"); output << host_tensor.host_view(); } } TEST(PredicatedTileIterator, volta_tensor_op_256x128x32_64x64x4) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 256; using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<128, 2, 4, 4, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{ 256, 128 }; cutlass::MatrixCoord output_extent{ 256, 128 }; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1, 1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator> <<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed || true) { std::ofstream output("volta_tensor_op_256x128x32_64x64x4.csv"); output << host_tensor.host_view(); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, simt_32x64x8_32x64x1) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 32 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 32; #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<64, 1, 4, 1, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<64, 4>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 64, // column 1, // row 4, // group 1, // cluster 1 // iterations >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 2, // column 1, // row 4, // group 1, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 32, // column 1, // row 4, // group 16, // cluster 1 // iterations >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 2, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{32, 64}; cutlass::MatrixCoord output_extent{27, 63}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed) { std::ofstream output("simt_32x64x8_32x64x1.csv"); output << host_tensor.host_view(); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, simt_128x128x8_32x64x1) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 32 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 256; #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<128, 1, 4, 4, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<128, 16>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 128, // column 1, // row 4, // group 4, // cluster 1 // iterations >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 2, // column 1, // row 2, // group 4, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 32, // column 1, // row 8, // group 32, // cluster 1 // iterations >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 2, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{128, 128}; cutlass::MatrixCoord output_extent{123, 121}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed) { std::ofstream output("simt_128x128x8_32x64x1.csv"); output << host_tensor.host_view(); } } ///////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/epilogue/threadblock/predicated_tile_iterator.cu/0
{ "file_path": "cutlass/test/unit/epilogue/threadblock/predicated_tile_iterator.cu", "repo_id": "cutlass", "token_count": 10243 }
58
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for Sm90 f16_f16_f16 cooperative DAG epilogue EVTDAG: D = beta * C + Graph(relu(alpha * acc + aux) + aux) DAGEVT: EVT = alpha * acc + C, D = Graph(maximum(EVT + per-row bias, EVT)) */ #include <iostream> #include "cutlass/cutlass.h" #include "cute/tensor.hpp" #include "cute/atom/mma_atom.hpp" #include "cutlass/numeric_types.h" #include "cutlass/gemm/device/gemm_universal_adapter.h" #include "cutlass/gemm/kernel/gemm_universal.hpp" #include "cutlass/epilogue/collective/collective_builder.hpp" #include "cutlass/gemm/collective/collective_builder.hpp" #include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" #include "cutlass/epilogue/collective/default_epilogue.hpp" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" #include "../../common/cutlass_unit_test.h" #include "gemm_testbed_3x_evt.hpp" #include "sm90_evt_operations.hpp" #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) using namespace cute; TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_EVTDAG) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule>; using AuxLoadDescriptor = cutlass::epilogue::collective::detail::AuxLoadDescriptor< EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t>; using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombEVTDAG< EpilogueDescriptor, AuxLoadDescriptor, cutlass::half_t, float, float>; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, EpilogueTileType, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, EpilogueSchedule, FusionCallbacks >::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; // Host reference bool passed = test::gemm::device::TestAllEVT<Gemm, test::gemm::device::HostEVTDAG<Gemm>>(); EXPECT_TRUE(passed); } TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 128x128x64_2x2x1_DAGEVT) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor< TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule>; using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor< EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t>; using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombDAGEVT< EpilogueDescriptor, AuxStoreDescriptor, cutlass::half_t, float, cutlass::half_t, float>; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, EpilogueTileType, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, EpilogueSchedule, FusionCallbacks >::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; // Host reference bool passed = test::gemm::device::TestAllEVT<Gemm, test::gemm::device::HostDAGEVT<Gemm>>(); EXPECT_TRUE(passed); } #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_dag.cu/0
{ "file_path": "cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_dag.cu", "repo_id": "cutlass", "token_count": 2633 }
59
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide Symm update interface */ #pragma once #include <iostream> #include <fstream> #include <sstream> #include "../../common/cutlass_unit_test.h" #include "cutlass/blas3.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/distribution.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/reference/host/symm.h" #include "cutlass/util/reference/host/symm_complex.h" #include "testbed_utils.h" namespace test { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Symm> struct TestbedSymmUniversal { using ElementA = typename Symm::ElementA; using ElementB = typename Symm::ElementB; using ElementC = typename Symm::ElementC; using ElementAccumulator = typename Symm::ElementAccumulator; using ElementCompute = typename Symm::SymmKernel::Epilogue::OutputOp::ElementCompute; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; uint64_t seed; cutlass::HostTensor<typename Symm::ElementA, typename Symm::LayoutA> tensor_A; cutlass::HostTensor<typename Symm::ElementB, typename Symm::LayoutB> tensor_B; cutlass::HostTensor<typename Symm::ElementC, typename Symm::LayoutC> tensor_C; cutlass::HostTensor<typename Symm::ElementC, typename Symm::LayoutC> tensor_D; cutlass::HostTensor<typename Symm::ElementC, typename Symm::LayoutC> reference_D; // // Methods // TestbedSymmUniversal( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint64_t seed_ = 2080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed, int mantissa_in_bits) { if (dist_kind == cutlass::Distribution::Uniform) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename Symm::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillRandomUniform( view, seed, scope_max, scope_min, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential( view.data(), view.capacity()); } else { EXPECT_TRUE(false) << "Input distribution not implemented"; return false; } return true; } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_symmetric_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed, int mantissa_in_bits) { if (dist_kind == cutlass::Distribution::Uniform) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename Symm::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillSymmetricRandomUniform( view, seed, Symm::kFillModeA, scope_max, scope_min, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillSymmetricRandomGaussian( view, seed, Symm::kFillModeA, 0, 0.5, mantissa_in_bits); } else { EXPECT_TRUE(false) << "Input distribution (symmetric tensor) not implemented"; return false; } return true; } /// Initializes data structures void initialize(cutlass::gemm::GemmCoord problem_size) { // // Allocate the Symm workspace // if (Symm::kSideModeA == cutlass::SideMode::kLeft) { tensor_A.resize(cutlass::make_Coord(problem_size.m(),problem_size.m())); } else if (Symm::kSideModeA == cutlass::SideMode::kRight) { tensor_A.resize(cutlass::make_Coord(problem_size.n(),problem_size.n())); } tensor_B.resize(problem_size.mn()); tensor_C.resize(problem_size.mn()); tensor_D.resize(problem_size.mn()); reference_D.resize(problem_size.mn(), false); EXPECT_TRUE(initialize_symmetric_tensor(tensor_A.host_view(), init_A, seed + 2019, cutlass::MantissaInBits<typename Symm::ElementA>::bits)); EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018, cutlass::MantissaInBits<typename Symm::ElementB>::bits)); EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017, cutlass::MantissaInBits<typename Symm::ElementC>::bits)); // It is possible to randomly initialize to all zeros, so override this with non-zeros // in the upper left corner of each operand. tensor_A.host_view().at({0, 0}) = typename Symm::ElementA(1); tensor_B.host_view().at({0, 0}) = typename Symm::ElementB(1); tensor_C.host_view().at({0, 0}) = typename Symm::ElementC(1); cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view()); tensor_A.sync_device(); tensor_B.sync_device(); tensor_C.sync_device(); tensor_D.sync_device(); } /// Compares computed reference with device reference and outputs to a file if incorrect bool compare_reference( cutlass::gemm::GemmCoord problem_size, ElementCompute alpha, ElementCompute beta) { tensor_D.sync_host(); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); if (tensor_D.size() > 1) EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); if (reference_D.size() > 1) EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); double l2_norm = cutlass::reference::host::TensorRelativeErrorMetric(reference_D.host_view(), tensor_D.host_view()); bool passed = l2_norm < cutlass::MantissaInBits<typename Symm::ElementA>::error; return passed; } /// Verifies the result is a Symm bool verify( cutlass::gemm::GemmCoord problem_size, ElementCompute alpha, ElementCompute beta) { // // Verify // using HostReference = typename cutlass::platform::conditional< (cutlass::platform::is_same<typename Symm::ElementC, cutlass::complex<double> >::value || cutlass::platform::is_same<typename Symm::ElementC, cutlass::complex<float> >::value ), cutlass::reference::host::SymmComplex< typename Symm::ElementA, typename Symm::LayoutA, Symm::kSideModeA, Symm::kFillModeA, typename Symm::ElementB, typename Symm::LayoutB, typename Symm::ElementC, typename Symm::LayoutC, ElementCompute, ElementAccumulator, Symm::kBlasMode>, cutlass::reference::host::Symm< typename Symm::ElementA, typename Symm::LayoutA, Symm::kSideModeA, Symm::kFillModeA, typename Symm::ElementB, typename Symm::LayoutB, typename Symm::ElementC, typename Symm::LayoutC, ElementCompute, ElementAccumulator> >::type; HostReference reference_symm; reference_symm( problem_size, alpha, tensor_A.host_ref(), tensor_B.host_ref(), beta, tensor_C.host_ref(), reference_D.host_ref(), ElementAccumulator(0) ); return compare_reference(problem_size, alpha, beta); } /// Returns true if the CUDA device is sufficient to execute the kernel. bool sufficient() const { // // Determine SMEM requirements and waive if not satisfied // size_t smem_size = sizeof(typename Symm::SymmKernel::SharedStorage); cudaDeviceProp properties; int device_idx; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDevice() API call failed."); } result = cudaGetDeviceProperties(&properties, device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProperties() failed"); } if (properties.sharedMemPerBlockOptin < smem_size) { return false; } return true; } /// Executes one test bool run( cutlass::gemm::GemmUniversalMode mode, cutlass::gemm::GemmCoord problem_size, int batch_count = 1, ElementCompute alpha = ElementCompute(1), ElementCompute beta = ElementCompute(0)) { // Waive test if insufficient CUDA device if (!sufficient()) { if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { std::cerr << "Test waived due to insufficient CUDA device." << std::endl; } return true; } #if 0 std::cout << "[TestbedSymmUniversal::run()] problem(m, n, k): " << problem_size << " alpha: " << ElementCompute(alpha) << " beta: " << ElementCompute(beta) << std::endl; #endif this->initialize(problem_size); // // Initialize the Symm operator // int batch_stride_A; if (Symm::kSideModeA == cutlass::SideMode::kLeft) batch_stride_A = problem_size.m()*problem_size.m(); if (Symm::kSideModeA == cutlass::SideMode::kRight) batch_stride_A = problem_size.n()*problem_size.n(); typename Symm::Arguments arguments{ mode, problem_size, batch_count, {alpha, beta}, tensor_A.device_data(), tensor_B.device_data(), tensor_C.device_data(), tensor_D.device_data(), batch_stride_A, problem_size.m() * problem_size.n(), problem_size.m() * problem_size.n(), problem_size.m() * problem_size.n(), tensor_A.layout().stride(0), tensor_B.layout().stride(0), tensor_C.layout().stride(0), tensor_D.layout().stride(0) }; Symm symm_op; size_t workspace_size = Symm::get_workspace_size(arguments); cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); cutlass::Status status = symm_op.initialize(arguments, workspace.get()); EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); // // Run the Symm // status = symm_op(); EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); // // Verify // bool passed = this->verify(problem_size, alpha, beta); //if (true) { if (!passed) { std::stringstream fname; fname << "error_" << (Symm::kBlasMode == cutlass::BlasMode::kSymmetric ? "symm_" : "hemm_" ) << "device_" << "fill_mode_a_" << (Symm::kSideModeA == cutlass::SideMode::kLeft ? "leftside_" : (Symm::kSideModeA == cutlass::SideMode::kRight ? "rightside_" : "invalid_")) << (Symm::kFillModeA == cutlass::FillMode::kLower ? "lower_" : (Symm::kFillModeA == cutlass::FillMode::kUpper ? "upper_" : "invalid_")) << "mnk_" << problem_size.m() << "x" << problem_size.n() << "x" << problem_size.k() << "_" << Symm::ThreadblockShape::kM << "x" << Symm::ThreadblockShape::kN << "x" << Symm::ThreadblockShape::kK << "_" << Symm::WarpShape::kM << "x" << Symm::WarpShape::kN << "x" << Symm::WarpShape::kK << ".txt"; std::cout << fname.str() << std::endl; std::ofstream results(fname.str()); results << problem_size << std::endl; results << "alpha: " << ElementCompute(alpha) << "\n" << "beta: " << ElementCompute(beta) << "\n" << "\nA:\n" << tensor_A.host_view() << "\n" << "\nB:\n" << tensor_B.host_view() << "\n" << "\nC:\n" << tensor_C.host_view() << "\n" << "\nD reference:\n" << reference_D.host_view() << "\n" << "\nD computed:\n" << tensor_D.host_view() << "\n"; } return passed; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Symm> bool TestsymmUniversal( cutlass::gemm::GemmCoord const & problem_size, cutlass::gemm::GemmUniversalMode mode, int batch_count, double alpha = 1.0, double beta = 2.0) { bool passed = true; TestbedSymmUniversal<Symm> testbed; using ElementCompute = typename Symm::EpilogueOutputOp::ElementCompute; passed = testbed.run( mode, problem_size, batch_count, cutlass::from_real<ElementCompute>(alpha), cutlass::from_real<ElementCompute>(beta) ); return passed; } template <typename Symm> bool TestAllSymmUniversal() { bool passed = true; int const kMinimumOperandElementSize = int(cutlass::sizeof_bits<typename Symm::ElementA>::value); int const kAlignment = cutlass::platform::is_same< typename Symm::OperatorClass, cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize; // int8_t gemm alignment constraints int const kAlignmentM = cutlass::platform::is_same<typename Symm::OperatorClass, cutlass::arch::OpClassSimt>::value && cutlass::platform::is_same<typename Symm::ElementA, int8_t>::value && cutlass::platform::is_same<typename Symm::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment; int const kAlignmentN = kAlignmentM; int const kAlignmentK = cutlass::platform::is_same<typename Symm::OperatorClass, cutlass::arch::OpClassSimt>::value && cutlass::platform::is_same<typename Symm::ElementA, int8_t>::value && cutlass::platform::is_same<typename Symm::LayoutA, cutlass::layout::RowMajor>::value ? 4 : kAlignment; cutlass::gemm::GemmUniversalMode modes[] = { cutlass::gemm::GemmUniversalMode::kGemm, }; int problem_size_m[] = { kAlignmentK, Symm::ThreadblockShape::kK * Symm::kStages - kAlignmentK, Symm::ThreadblockShape::kK * Symm::kStages * 3 - kAlignmentK }; int problem_size_n[] = { kAlignmentN, 512 - 2*kAlignmentN }; int batch_counts[] = { // may be interpretted as batch count or split-K slices 1 // Just running one batch for now (removing 2, 3, 5, 7) }; double problem_alpha[] = { 1.0, 3.0 }; double problem_beta[] = { 0, 2.0 }; using ElementCompute = typename Symm::EpilogueOutputOp::ElementCompute; for (cutlass::gemm::GemmUniversalMode mode : modes) { for (int m : problem_size_m) { for (int n : problem_size_n) { for (int batch_count : batch_counts) { for (auto alpha : problem_alpha) { for (auto beta : problem_beta) { int k = 0; if (Symm::kSideModeA == cutlass::SideMode::kLeft) k = m; else if (Symm::kSideModeA == cutlass::SideMode::kRight) k = n; if (mode == cutlass::gemm::GemmUniversalMode::kGemm || mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) { #if 0 // skip very small K problems if (k / batch_count < 2 * Symm::ThreadblockShape::kK) { continue; } #endif } cutlass::gemm::GemmCoord problem_size(m, n, k); TestbedSymmUniversal<Symm> testbed; passed = testbed.run( mode, problem_size, batch_count, cutlass::from_real<ElementCompute>(alpha), cutlass::from_real<ElementCompute>(beta) ); if (!passed) { return false; } } } } } } } return passed; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace gemm } // namespace test /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/gemm/device/testbed_symm_universal.h/0
{ "file_path": "cutlass/test/unit/gemm/device/testbed_symm_universal.h", "repo_id": "cutlass", "token_count": 8569 }
60
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #pragma once #include "cutlass/gemm/thread/mma.h" #include "cutlass/layout/vector.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/gemm.h" namespace test { namespace gemm { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Thread-level matrix multiply-accumulate template <typename Mma> void kernel( typename Mma::ElementC *D, typename Mma::ElementA const *A, typename Mma::ElementB const *B, typename Mma::ElementC const *C) { auto ptr_D = reinterpret_cast<cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> *>(D); auto ptr_A = reinterpret_cast<cutlass::Array<typename Mma::ElementA, Mma::Shape::kMK> const *>(A); auto ptr_B = reinterpret_cast<cutlass::Array<typename Mma::ElementB, Mma::Shape::kKN> const *>(B); auto ptr_C = reinterpret_cast<cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> const *>(C); Mma mma; auto a = *ptr_A; auto b = *ptr_B; auto c = *ptr_C; using Btype = typename Mma::ElementB; cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> d; mma(d, a, b, c); *ptr_D = d; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape, /// Data type of A elements typename ElementA, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Data type of B elements typename ElementB, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Element type of C matrix typename ElementC, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC > struct Testbed { /// Thread-level matrix multiply-accumulate operator using Mma = cutlass::gemm::thread::Mma< Shape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC >; // // Data members // cutlass::HostTensor<ElementA, LayoutA> tensor_A; cutlass::HostTensor<ElementB, LayoutB> tensor_B; cutlass::HostTensor<ElementC, LayoutC> tensor_C; cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed; cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference; // // Methods // /// Allocates workspace in device memory Testbed() { tensor_A.reset(cutlass::make_Coord(Shape::kM, Shape::kK), false); tensor_B.reset(cutlass::make_Coord(Shape::kK, Shape::kN), false); tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false); tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false); tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false); } /// Runs the test bool run() { // // initialize device memory // cutlass::reference::host::detail::RandomUniformFunc< ElementA > tfill_rand_func( 0, // seed 10, // max 0, // min 0); // bits after decimal cutlass::reference::host::detail::TensorFillRandomUniformFunc< ElementA, LayoutA > tfill_rand( tensor_A.host_view(), tfill_rand_func); for (auto i=0; i< Shape::kM; i++) for (auto j=0; j< Shape::kK; j++) tfill_rand(cutlass::make_Coord(i,j)); cutlass::reference::host::BlockFillSequential( tensor_B.host_data(), tensor_B.capacity(), ElementB(1), ElementB(2) ); cutlass::reference::host::TensorFill( tensor_C.host_view(), ElementC(0) ); cutlass::reference::host::TensorFill( tensor_D_computed.host_view(), ElementC(0) ); cutlass::reference::host::TensorFill( tensor_D_reference.host_view(), ElementC(0) ); // Host side call kernel<Mma>( tensor_D_computed.host_data(), tensor_A.host_data(), tensor_B.host_data(), tensor_C.host_data()); // // Reference implementation // cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementC, ElementC> reference_gemm; reference_gemm( {Shape::kM, Shape::kN, Shape::kK}, ElementC(1), tensor_A.host_ref(), tensor_B.host_ref(), ElementC(0), tensor_D_reference.host_ref() ); // // Verify equivalence // // compare bool passed = cutlass::reference::host::TensorEquals( tensor_D_computed.host_view(), tensor_D_reference.host_view() ); EXPECT_TRUE(passed) << "A:\n" << tensor_A.host_view() << "\n\n" << "B:\n" << tensor_B.host_view() << "\n\n" << "C:\n" << tensor_C.host_view() << "\n\n" << "Reference:\n" << tensor_D_reference.host_view() << "\n\n" << "Computed:\n" << tensor_D_computed.host_view() << std::endl; return passed; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace gemm } // namespace test
cutlass/test/unit/gemm/thread/host/testbed_host.h/0
{ "file_path": "cutlass/test/unit/gemm/thread/host/testbed_host.h", "repo_id": "cutlass", "token_count": 2631 }
61
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit testbed for kernel-level GEMM */ #pragma once #include <fstream> #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/vector.h" #include "cutlass/numeric_types.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/distribution.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/gemm/threadblock/default_mma_core_simt.h" #include "cutlass/gemm/threadblock/default_mma_core_sm75.h" #include "cutlass/gemm/threadblock/default_mma_core_sm70.h" #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h" #include "cutlass/cutlass.h" #include "cutlass/platform/platform.h" namespace test { namespace gemm { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Mma> __global__ void kernel_mma(cutlass::gemm::GemmCoord problem_size, typename Mma::IteratorA::Params params_A, typename Mma::IteratorA::TensorRef ref_A, typename Mma::IteratorB::Params params_B, typename Mma::IteratorB::TensorRef ref_B, typename Mma::ElementC *ptr_C, typename Mma::LayoutC::Stride::Index ldc) { // Shared storage needed by threadblock-scoped matrix multiply-accumulate __shared__ typename Mma::SharedStorage shared_storage; // Compute threadblock location cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y), 0}; cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM, tb_tile_offset.k()}; cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(), tb_tile_offset.n() * Mma::Shape::kN}; // Compute position within threadblock int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x; // Construct iterators to A and B operands typename Mma::IteratorA iterator_A(params_A, ref_A.data(), {problem_size.m(), problem_size.k()}, tb_thread_id, tb_offset_A); typename Mma::IteratorB iterator_B(params_B, ref_B.data(), {problem_size.k(), problem_size.n()}, tb_thread_id, tb_offset_B); int warp_id = threadIdx.y; int lane_id = threadIdx.x; // Construct thread-scoped matrix multiply Mma mma(shared_storage, tb_thread_id, warp_id, threadIdx.x); typename Mma::FragmentC accum; accum.clear(); int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; // Compute threadblock-scoped matrix multiply-add mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum); // Output results typename Mma::Operator::IteratorC iterator_C({ptr_C, ldc}, lane_id); iterator_C.add_tile_offset( {(tb_tile_offset.m() * Mma::WarpCount::kM) + (warp_id % Mma::WarpCount::kM), (tb_tile_offset.n() * Mma::WarpCount::kN) + (warp_id / Mma::WarpCount::kM)}); iterator_C.store(accum); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product template < /// Threadblock-level matrix multiply-accumulate typename MmaCore_, /// Number of stages int Stages = 2> struct Testbed { /// Threadblock-level GEMM implementation using MmaCore = MmaCore_; using ThreadblockShape = typename MmaCore::Shape; using WarpShape = typename MmaCore::WarpShape; using InstructionShape = typename MmaCore::InstructionShape; using ElementA = typename MmaCore::ElementA; using LayoutA = typename MmaCore::LayoutA; using ElementB = typename MmaCore::ElementB; using LayoutB = typename MmaCore::LayoutB; using ElementC = typename MmaCore::ElementC; using LayoutC = typename MmaCore::LayoutC; static const int kStages = Stages; // Define iterators over tiles from the A operand static const bool use_idp4a = cutlass::platform::is_same<ElementA, int8_t>::value && cutlass::platform::is_same<ElementB, int8_t>::value && cutlass::platform::is_same<typename MmaCore::OperatorClass, cutlass::arch::OpClassSimt>::value; static const bool transposeA = cutlass::platform::is_same< LayoutA, cutlass::layout::ColumnMajor >::value; static const bool transposeB = cutlass::platform::is_same< LayoutB, cutlass::layout::RowMajor >::value; using IteratorA = typename cutlass::platform::conditional< use_idp4a, cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, transposeA> , cutlass::transform::threadblock::PredicatedTileIterator< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA> >::type; // Define iterators over tiles from the B operand using IteratorB = typename cutlass::platform::conditional< use_idp4a, cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, transposeB> , cutlass::transform::threadblock::PredicatedTileIterator< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB> >::type; // Define MmaPipeline Single Stage using MmaPipelineSingleStage = cutlass::gemm::threadblock::MmaSingleStage< typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, IteratorB, typename MmaCore::SmemIteratorB, ElementC, LayoutC, typename MmaCore::MmaPolicy>; // Define MmaPipeline Two Stages using MmaPipelineTwoStages = cutlass::gemm::threadblock::MmaPipelined< typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, IteratorB, typename MmaCore::SmemIteratorB, ElementC, LayoutC, typename MmaCore::MmaPolicy>; // Define the threadblock-scoped pipelined matrix multiply (Select between Single vs. Two stages) using Mma = typename cutlass::platform::conditional<(kStages==1), MmaPipelineSingleStage, MmaPipelineTwoStages>::type; // // Data members // cutlass::HostTensor<ElementA, LayoutA> matrix_A; cutlass::HostTensor<ElementB, LayoutB> matrix_B; cutlass::HostTensor<ElementC, LayoutC> matrix_C_computed; cutlass::HostTensor<ElementC, LayoutC> matrix_C_reference; cutlass::gemm::GemmCoord problem_size; float alpha, beta; // // Methods // /// Allocates workspace in device memory Testbed(int m, int n, int k, float alpha_, float beta_) : problem_size(m, n, k), alpha(alpha_), beta(beta_) { matrix_A.reset(cutlass::make_Coord(m, k)); matrix_B.reset(cutlass::make_Coord(k, n)); matrix_C_computed.reset(cutlass::make_Coord(m, n)); matrix_C_reference.reset(cutlass::make_Coord(m, n), false); } bool sufficient() { return true; } /// Runs the test bool run( dim3 grid, dim3 block, cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) { // Waive test if insufficient CUDA device if (!sufficient()) { if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { std::cerr << "Test waived due to insufficient CUDA device." << std::endl; } return true; } // // initialize device memory // if (init_A == cutlass::Distribution::Uniform) { int scope_max = 8; int scope_min = -8; if (cutlass::sizeof_bits<ElementA>::value == 4) { scope_max = 2; scope_min = -2; } else if (cutlass::sizeof_bits<ElementA>::value == 1) { scope_max = 2; scope_min = 0; } uint64_t seed = 7; cutlass::reference::host::TensorFillRandomUniform( matrix_A.host_view(), seed, scope_max, scope_min, 0); } else if (init_A == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential(matrix_A.host_data(), matrix_A.capacity()); } else if (init_A == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(matrix_A.host_view()); } else { return false; } if (init_B == cutlass::Distribution::Uniform) { int scope_max = 8; int scope_min = -8; if (cutlass::sizeof_bits<ElementB>::value == 4) { scope_max = 2; scope_min = -2; } else if (cutlass::sizeof_bits<ElementB>::value == 1) { scope_max = 2; scope_min = 0; } uint64_t seed = 7; cutlass::reference::host::TensorFillRandomUniform( matrix_B.host_view(), seed + 16, scope_max, scope_min, 0); } else if (init_B == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential(matrix_B.host_data(), matrix_B.capacity()); } else if (init_B == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(matrix_B.host_view()); } else { return false; } cutlass::reference::host::TensorFill(matrix_C_computed.host_view()); cutlass::reference::host::TensorFill(matrix_C_reference.host_view()); matrix_A.sync_device(); matrix_B.sync_device(); matrix_C_computed.sync_device(); typename IteratorA::Params params_A(matrix_A.layout()); typename IteratorB::Params params_B(matrix_B.layout()); test::gemm::threadblock::kernel_mma<Mma><<<grid, block>>>( problem_size, params_A, matrix_A.device_ref(), params_B, matrix_B.device_ref(), matrix_C_computed.device_data(), matrix_C_computed.layout().stride(0)); // // Check error code // cudaError_t result = cudaDeviceSynchronize(); EXPECT_EQ(result, cudaSuccess) << " kernel error: " << cudaGetErrorString(result) << " on device " << GetCudaDevice(); matrix_C_computed.sync_host(); cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementC, ElementC, typename MmaCore::Operator> reference_gemm; reference_gemm( problem_size, ElementC(alpha), matrix_A.host_view(), matrix_B.host_view(), ElementC(beta), matrix_C_reference.host_view()); bool passed = cutlass::reference::host::TensorEquals( matrix_C_computed.host_view(), matrix_C_reference.host_view()); EXPECT_TRUE(passed) << "Failed on device " << GetCudaDevice(); if (!passed) { std::ofstream output("mma_pipelined_testbed_errors.txt"); output << "A:\n" << matrix_A.host_view() << "\n" << "B:\n" << matrix_B.host_view() << "\n" << "Reference:\n" << matrix_C_reference.host_view() << "\n" << "Computed:\n" << matrix_C_computed.host_view() << "\n"; } return passed; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace test
cutlass/test/unit/gemm/threadblock/mma_pipelined_testbed.h/0
{ "file_path": "cutlass/test/unit/gemm/threadblock/mma_pipelined_testbed.h", "repo_id": "cutlass", "token_count": 5385 }
62
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #pragma once #include "cutlass/array.h" namespace test { namespace nvrtc { namespace kernel { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Thread-level matrix multiply-accumulate template <typename Mma> __global__ void testbed_kernel( typename Mma::ElementC *D, typename Mma::ElementA const *A, typename Mma::ElementB const *B, typename Mma::ElementC const *C) { auto ptr_D = reinterpret_cast<cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> *>(D); auto ptr_A = reinterpret_cast<cutlass::Array<typename Mma::ElementA, Mma::Shape::kMK> const *>(A); auto ptr_B = reinterpret_cast<cutlass::Array<typename Mma::ElementB, Mma::Shape::kKN> const *>(B); auto ptr_C = reinterpret_cast<cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> const *>(C); Mma mma; auto a = *ptr_A; auto b = *ptr_B; auto c = *ptr_C; cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> d; mma(d, a, b, c); *ptr_D = d; } } } } }
cutlass/test/unit/nvrtc/kernel/thread/testbed_kernel.h/0
{ "file_path": "cutlass/test/unit/nvrtc/kernel/thread/testbed_kernel.h", "repo_id": "cutlass", "token_count": 861 }
63
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/core_io.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace gemm { namespace threadblock { /// template <typename Iterator> __global__ void kernel_gemm_threadblock_tensor_op_multiplicand_store( typename Iterator::TensorRef ref_output, typename Iterator::Element *input) { // Construct fragment typename Iterator::Fragment frag; frag.clear(); // each thread loads a fragment using AccessType = cutlass::Array<typename Iterator::Element, Iterator::ThreadMap::kElementsPerAccess>; int const kElementsPerAccess = Iterator::ThreadMap::kElementsPerAccess; int stride = Iterator::Shape::kContiguous; int warp_id = (threadIdx.x / 32); int lane_id = (threadIdx.x % 32); input += (lane_id % 8) * kElementsPerAccess + (lane_id / 8) * stride; input += (warp_id * Iterator::Shape::kStrided / Iterator::ThreadMap::Detail::kWarpCount) * stride; CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Iterator::ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Iterator::ThreadMap::Iterations::kContiguous; ++c) { CUTLASS_PRAGMA_UNROLL for (int v = 0; v < Iterator::ThreadMap::kElementsPerAccess; ++v) { frag[v + Iterator::ThreadMap::kElementsPerAccess * (c + s * Iterator::ThreadMap::Iterations::kContiguous)] = input[v + c * 64 + s * Iterator::ThreadMap::Delta::kStrided * stride]; } } } // Use iterator to store results Iterator iter(ref_output, threadIdx.x); iter.store(frag); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Simple test environment template < typename Shape_, int WarpCount > class MultiplicandTileIteratorTestbed { public: // // Define iterator // using Shape = Shape_; using Element = cutlass::half_t; using Layout = cutlass::layout::TensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; static int const kAdvanceRank = 1; static int const kThreads = 32 * WarpCount; using ThreadMap = cutlass::transform::PitchLinearWarpRakedThreadMap< Shape, kThreads, cutlass::layout::PitchLinearShape<8, 4>, 128 / cutlass::sizeof_bits<Element>::value >; using Iterator = cutlass::transform::threadblock::RegularTileIterator< Shape, Element, Layout, kAdvanceRank, ThreadMap >; public: // // Members // cutlass::HostTensor<Element, Layout> destination_tensor; cutlass::HostTensor<Element, cutlass::layout::PitchLinear> source_tensor; public: MultiplicandTileIteratorTestbed(): destination_tensor({Shape::kContiguous, Shape::kStrided}), source_tensor({Shape::kContiguous, Shape::kStrided}) { } bool run() { cutlass::reference::host::BlockFillSequential( source_tensor.host_data(), source_tensor.capacity() ); cutlass::reference::host::BlockFillSequential( destination_tensor.host_data(), destination_tensor.capacity(), Element(0), Element(0) ); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); destination_tensor.sync_device(); source_tensor.sync_device(); test::gemm::threadblock::kernel_gemm_threadblock_tensor_op_multiplicand_store<Iterator><<< grid, block >>>( destination_tensor.device_ref(), source_tensor.device_data() ); cudaError_t result = cudaDeviceSynchronize(); EXPECT_EQ(result, cudaSuccess) << " - CUDA ERROR: " << cudaGetErrorString(result); destination_tensor.sync_host(); // // Verify // // Verify that its contents match the destination int errors = 0; for (int s = 0; s < Shape::kStrided; ++s) { for (int c = 0; c < Shape::kContiguous; ++c) { if (errors >= 10) { break; } Element expected = source_tensor.at({c, s}); Element got = destination_tensor.at({c, s}); bool passed = (expected == got); if (!passed) { ++errors; } } } EXPECT_EQ(errors, 0) << source_tensor.host_view() << "\n\n" << destination_tensor.host_view() << std::endl; return !errors; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x8_w1) { test::gemm::threadblock::MultiplicandTileIteratorTestbed< cutlass::layout::PitchLinearShape<64, 8>, 1>().run(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x16_w1) { test::gemm::threadblock::MultiplicandTileIteratorTestbed< cutlass::layout::PitchLinearShape<64, 16>, 1>().run(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x16_w2) { test::gemm::threadblock::MultiplicandTileIteratorTestbed< cutlass::layout::PitchLinearShape<64, 16>, 2>().run(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 128x8_w1) { test::gemm::threadblock::MultiplicandTileIteratorTestbed< cutlass::layout::PitchLinearShape<128, 8>, 1>().run(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 64x32_w4) { test::gemm::threadblock::MultiplicandTileIteratorTestbed< cutlass::layout::PitchLinearShape<64, 32>, 4>().run(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 128x32_w1) { test::gemm::threadblock::MultiplicandTileIteratorTestbed< cutlass::layout::PitchLinearShape<128, 32>, 1>().run(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 128x32_w4) { test::gemm::threadblock::MultiplicandTileIteratorTestbed< cutlass::layout::PitchLinearShape<128, 32>, 4>().run(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 256x32_w4) { test::gemm::threadblock::MultiplicandTileIteratorTestbed< cutlass::layout::PitchLinearShape<256, 32>, 4>().run(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_gemm_threadblock_tensor_op_multplicand_iterator_congruous_16b, 256x32_w8) { test::gemm::threadblock::MultiplicandTileIteratorTestbed< cutlass::layout::PitchLinearShape<256, 32>, 8>().run(); } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/transform/threadblock/regular_tile_iterator_tensor_op.cu/0
{ "file_path": "cutlass/test/unit/transform/threadblock/regular_tile_iterator_tensor_op.cu", "repo_id": "cutlass", "token_count": 3054 }
64
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Defines operations for all CONV operation kinds in CUTLASS Library. */ #pragma once #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d_fprop.h" #include "cutlass/conv/kernel/default_conv2d_group_fprop.h" #include "cutlass/conv/kernel/default_depthwise_fprop.h" #include "cutlass/conv/kernel/default_conv2d_dgrad.h" #include "cutlass/conv/kernel/default_conv2d_wgrad.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/conv/device/direct_convolution.h" #include "cutlass/library/library.h" #include "library_internal.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/convolution.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/core_io.h" /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Operator_> class Conv2dOperationBase : public Operation { public: using Operator = Operator_; using ElementA = typename Operator::ElementA; using LayoutA = typename Operator::LayoutA; using ElementB = typename Operator::ElementB; using LayoutB = typename Operator::LayoutB; using ElementC = typename Operator::ElementC; using LayoutC = typename Operator::LayoutC; using ElementAccumulator = typename Operator::ElementAccumulator; using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = Operator::kIteratorAlgorithm; static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator; using OperatorArguments = typename Operator::Arguments; protected: /// ConvDescription description_; public: /// Constructor Conv2dOperationBase(char const *name = "unknown_conv2d") { description_.name = name; description_.provider = Provider::kCUTLASS; description_.kind = OperationKind::kConv2d; description_.conv_dim = Operator::kConvDim; description_.iterator_algorithm = IteratorAlgorithmMap<Operator::kIteratorAlgorithm>::kId; description_.tile_description.threadblock_shape = make_Coord( Operator::ThreadblockShape::kM, Operator::ThreadblockShape::kN, Operator::ThreadblockShape::kK); description_.tile_description.threadblock_stages = Operator::kStages; description_.tile_description.warp_count = make_Coord( Operator::UnderlyingKernel::WarpCount::kM, Operator::UnderlyingKernel::WarpCount::kN, Operator::UnderlyingKernel::WarpCount::kK); description_.tile_description.math_instruction.instruction_shape = make_Coord( Operator::InstructionShape::kM, Operator::InstructionShape::kN, Operator::InstructionShape::kK); description_.tile_description.math_instruction.element_accumulator = NumericTypeMap<ElementAccumulator>::kId; description_.tile_description.math_instruction.opcode_class = OpcodeClassMap<typename Operator::OperatorClass>::kId; description_.tile_description.math_instruction.math_operation = MathOperationMap<typename Operator::MathOperator>::kId; description_.tile_description.minimum_compute_capability = ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMin; description_.tile_description.maximum_compute_capability = ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMax; description_.A = make_TensorDescription<ElementA, LayoutA>(); description_.B = make_TensorDescription<ElementB, LayoutB>(); description_.C = make_TensorDescription<ElementC, LayoutC>(); description_.element_epilogue = NumericTypeMap<ElementCompute>::kId; // TODO: Add split k mode Serial and parallel to convolutions // description_.split_k_mode = Operator::kSplitK ? SplitKMode::kSerial : SplitKMode::kNone; } /// Returns the description of the GEMM operation virtual OperationDescription const & description() const { return description_; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // // Conv2d library operation class for cutlass profiler // /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Operator_> class Conv2dOperation : public Conv2dOperationBase<Operator_> { public: using Operator = Operator_; using ElementA = typename Operator::ElementA; using LayoutA = typename Operator::LayoutA; using ElementB = typename Operator::ElementB; using LayoutB = typename Operator::LayoutB; using ElementC = typename Operator::ElementC; using LayoutC = typename Operator::LayoutC; using ElementAccumulator = typename Operator::ElementAccumulator; using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator; using OperatorArguments = typename Operator::Arguments; public: /// Constructor Conv2dOperation(char const *name = "unknown_conv2d_fprop") : Conv2dOperationBase<Operator_>(name) { this->description_.conv_kind = ConvKindMap<kConvolutionalOperator>::kId; } protected: /// Constructs the arguments structure given the configuration and arguments static Status construct_arguments_( OperatorArguments &operator_args, Conv2dConfiguration const *configuration) { operator_args.problem_size = configuration->problem_size; operator_args.ref_A = { nullptr, LayoutA::packed(implicit_gemm_tensor_a_extent(kConvolutionalOperator, configuration->problem_size)) }; operator_args.ref_B = { nullptr, LayoutB::packed(implicit_gemm_tensor_b_extent(kConvolutionalOperator, configuration->problem_size)) }; operator_args.ref_C = { nullptr, LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size)) }; operator_args.ref_D = { nullptr, LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size)) }; operator_args.split_k_mode = configuration->split_k_mode; return Status::kSuccess; } /// Constructs the arguments structure given the configuration and arguments static Status update_arguments_( OperatorArguments &operator_args, ConvArguments const *arguments) { if (arguments->pointer_mode == ScalarPointerMode::kHost) { typename Operator::EpilogueOutputOp::Params params( *static_cast<ElementCompute const *>(arguments->alpha), *static_cast<ElementCompute const *>(arguments->beta) ); operator_args.output_op = params; } else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ typename Operator::EpilogueOutputOp::Params params( static_cast<ElementCompute const *>(arguments->alpha), static_cast<ElementCompute const *>(arguments->beta) ); operator_args.output_op = params; } else { return Status::kErrorInvalidProblem; } operator_args.ref_A.reset(static_cast<ElementA *>(const_cast<void *>(arguments->A))); operator_args.ref_B.reset(static_cast<ElementB *>(const_cast<void *>(arguments->B))); operator_args.ref_C.reset(static_cast<ElementC *>(const_cast<void *>(arguments->C))); operator_args.ref_D.reset(static_cast<ElementC *>(const_cast<void *>(arguments->D))); return Status::kSuccess; } public: /// Returns success if the operation can proceed virtual Status can_implement( void const *configuration_ptr, void const *arguments_ptr) const { Conv2dConfiguration const *configuration = static_cast<Conv2dConfiguration const *>(configuration_ptr); ConvArguments const *arguments = static_cast<ConvArguments const *>(arguments_ptr); OperatorArguments args; Status status = construct_arguments_(args, configuration); if (status != Status::kSuccess) { return status; } status = update_arguments_(args, arguments); if (status != Status::kSuccess) { return status; } return Operator::can_implement(args); } /// Gets the host-side workspace virtual uint64_t get_host_workspace_size( void const *configuration) const { return sizeof(Operator); } /// Gets the device-side workspace virtual uint64_t get_device_workspace_size( void const *configuration_ptr, void const *arguments_ptr = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<Conv2dConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return 0; } return Operator::get_workspace_size(args); } /// Initializes the workspace virtual Status initialize( void const *configuration_ptr, void *host_workspace, void *device_workspace, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<Conv2dConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = new (host_workspace) Operator; //std::cout << "initialize library::Conv2dOperation" << std::endl; //print_operator_args(args); return op->initialize(args, device_workspace, stream); } /// Runs the kernel virtual Status run( void const *arguments_ptr, void *host_workspace, void *device_workspace = nullptr, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = update_arguments_( args, static_cast<ConvArguments const *>(arguments_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = static_cast<Operator *>(host_workspace); status = op->update(args, device_workspace); if (status != Status::kSuccess) { return status; } //std::cout << "run library::Conv2dOperation" << std::endl; //print_operator_args(args); return op->run(stream); } /// Call print_operator_args from the Conv2dOperation::initialize() // to dump arguments passed on to cutlass operator for debugging void print_operator_args(OperatorArguments &operator_args) const { std::cout << "Conv2dOperation::OperatorArguments" << std::endl << " problem_size:" << std::endl << operator_args.problem_size << std::endl << " split_k_mode: " << (operator_args.split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial" : "parallel") << std::endl << " epilogue (alpha, beta): " << operator_args.output_op.alpha << ", " << operator_args.output_op.beta << std::endl << " ref_A (ptr, {stride}): " << operator_args.ref_A.data() << ", {" << operator_args.ref_A.stride(0) << ", " << operator_args.ref_A.stride(1) << ", " << operator_args.ref_A.stride(2) << "}" << std::endl << " ref_B (ptr, {stride}): " << operator_args.ref_B.data() << ", {" << operator_args.ref_B.stride(0) << ", " << operator_args.ref_B.stride(1) << ", " << operator_args.ref_B.stride(2) << "}" << std::endl << " ref_C (ptr, {stride}): " << operator_args.ref_C.data() << ", {" << operator_args.ref_C.stride(0) << ", " << operator_args.ref_C.stride(1) << ", " << operator_args.ref_C.stride(2) << "}" << std::endl << " ref_D (ptr, {stride}): " << operator_args.ref_D.data() << ", {" << operator_args.ref_D.stride(0) << ", " << operator_args.ref_D.stride(1) << ", " << operator_args.ref_D.stride(2) << "}" << std::endl; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // // DirectConv2d library operation class for cutlass profiler // /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Operator_> class DirectConv2dOperation : public Conv2dOperation<Operator_> { public: using Operator = Operator_; using Base = Conv2dOperation<Operator_>; using ElementA = typename Operator::ElementA; using LayoutA = typename Operator::LayoutA; using ElementB = typename Operator::ElementB; using LayoutB = typename Operator::LayoutB; using ElementC = typename Operator::ElementC; using LayoutC = typename Operator::LayoutC; using ElementAccumulator = typename Operator::ElementAccumulator; using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; static cutlass::conv::Operator const kConvolutionalOperator = Operator::kConvolutionalOperator; using OperatorArguments = typename Operator::Arguments; public: /// Constructor DirectConv2dOperation(char const *name = "unknown_direct)conv2d_fprop") : Conv2dOperation<Operator_>(name) { this->description_.conv_kind = ConvKindMap<kConvolutionalOperator>::kId; } protected: /// Constructs the arguments structure given the configuration and arguments static Status construct_arguments_( OperatorArguments &operator_args, Conv2dConfiguration const *configuration) { operator_args.problem_size = configuration->problem_size; operator_args.ref_A = { nullptr, LayoutA::packed(implicit_gemm_tensor_a_extent(kConvolutionalOperator, configuration->problem_size)) }; operator_args.ref_B = { nullptr, LayoutB::packed(implicit_gemm_tensor_b_extent(kConvolutionalOperator, configuration->problem_size)) }; operator_args.ref_reordered_B = { nullptr, LayoutB::packed(implicit_gemm_tensor_b_extent(kConvolutionalOperator, configuration->problem_size)) }; operator_args.ref_C = { nullptr, LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size)) }; operator_args.ref_D = { nullptr, LayoutC::packed(implicit_gemm_tensor_c_extent(kConvolutionalOperator, configuration->problem_size)) }; operator_args.split_k_mode = configuration->split_k_mode; return Status::kSuccess; } /// Constructs the arguments structure given the configuration and arguments static Status update_arguments_( OperatorArguments &operator_args, ConvArguments const *arguments) { if (arguments->pointer_mode == ScalarPointerMode::kHost) { typename Operator::EpilogueOutputOp::Params params( *static_cast<ElementCompute const *>(arguments->alpha), *static_cast<ElementCompute const *>(arguments->beta) ); operator_args.output_op = params; } else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ typename Operator::EpilogueOutputOp::Params params( static_cast<ElementCompute const *>(arguments->alpha), static_cast<ElementCompute const *>(arguments->beta) ); operator_args.output_op = params; } else { return Status::kErrorInvalidProblem; } operator_args.ref_A.reset(static_cast<ElementA *>(const_cast<void *>(arguments->A))); operator_args.ref_B.reset(static_cast<ElementB *>(const_cast<void *>(arguments->B))); operator_args.ref_C.reset(static_cast<ElementC *>(const_cast<void *>(arguments->C))); operator_args.ref_D.reset(static_cast<ElementC *>(const_cast<void *>(arguments->D))); operator_args.ref_reordered_B.reset(static_cast<ElementC *>(const_cast<void *>(arguments->reordered_B))); return Status::kSuccess; } public: /// Returns success if the operation can proceed virtual Status can_implement( void const *configuration_ptr, void const *arguments_ptr) const { Conv2dConfiguration const *configuration = static_cast<Conv2dConfiguration const *>(configuration_ptr); ConvArguments const *arguments = static_cast<ConvArguments const *>(arguments_ptr); OperatorArguments args; Status status = construct_arguments_(args, configuration); if (status != Status::kSuccess) { return status; } status = update_arguments_(args, arguments); if (status != Status::kSuccess) { return status; } return Operator::can_implement(args); } /// Gets the host-side workspace virtual uint64_t get_host_workspace_size( void const *configuration) const { return sizeof(Operator); } /// Gets the device-side workspace virtual uint64_t get_device_workspace_size( void const *configuration_ptr, void const *arguments_ptr = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<Conv2dConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return 0; } return Operator::get_workspace_size(args); } /// Initializes the workspace virtual Status initialize( void const *configuration_ptr, void *host_workspace, void *device_workspace, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<Conv2dConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = new (host_workspace) Operator; //std::cout << "initialize library::Conv2dOperation" << std::endl; //print_operator_args(args); return op->initialize(args, device_workspace, stream); } /// Runs the kernel virtual Status run( void const *arguments_ptr, void *host_workspace, void *device_workspace = nullptr, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = update_arguments_( args, static_cast<ConvArguments const *>(arguments_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = static_cast<Operator *>(host_workspace); status = op->update(args, device_workspace); if (status != Status::kSuccess) { return status; } //std::cout << "run library::Conv2dOperation" << std::endl; //print_operator_args(args); return op->run(stream); } /// Call print_operator_args from the Conv2dOperation::initialize() // to dump arguments passed on to cutlass operator for debugging void print_operator_args(OperatorArguments &operator_args) const { std::cout << "Conv2dOperation::OperatorArguments" << std::endl << " problem_size:" << std::endl << operator_args.problem_size << std::endl << " split_k_mode: " << (operator_args.split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial" : "parallel") << std::endl << " epilogue (alpha, beta): " << operator_args.output_op.alpha << ", " << operator_args.output_op.beta << std::endl << " ref_A (ptr, {stride}): " << operator_args.ref_A.data() << ", {" << operator_args.ref_A.stride(0) << ", " << operator_args.ref_A.stride(1) << ", " << operator_args.ref_A.stride(2) << "}" << std::endl << " ref_B (ptr, {stride}): " << operator_args.ref_B.data() << ", {" << operator_args.ref_B.stride(0) << ", " << operator_args.ref_B.stride(1) << ", " << operator_args.ref_B.stride(2) << "}" << std::endl << " ref_C (ptr, {stride}): " << operator_args.ref_C.data() << ", {" << operator_args.ref_C.stride(0) << ", " << operator_args.ref_C.stride(1) << ", " << operator_args.ref_C.stride(2) << "}" << std::endl << " ref_D (ptr, {stride}): " << operator_args.ref_D.data() << ", {" << operator_args.ref_D.stride(0) << ", " << operator_args.ref_D.stride(1) << ", " << operator_args.ref_D.stride(2) << "}" << std::endl; } }; } // namespace library } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/library/src/conv2d_operation.h/0
{ "file_path": "cutlass/tools/library/src/conv2d_operation.h", "repo_id": "cutlass", "token_count": 8020 }
65
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Defines operations for all CONV operation kinds in CUTLASS Library */ #pragma once #include <iostream> #include <sstream> #include <cstring> #include "cutlass/cutlass.h" #include "cutlass/library/library.h" #include "cutlass/library/manifest.h" #include "cutlass/library/util.h" #include "library_internal.h" #include "cutlass/conv/convolution.h" #include "cutlass/util/reference/host/convolution.h" #include "cutlass/util/reference/device/convolution.h" /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { /////////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template < Provider kProvider, cutlass::conv::Operator ConvolutionalOperator, int ConvDim, typename ElementA_, typename LayoutA_, typename ElementB_, typename LayoutB_, typename ElementC_, typename LayoutC_, typename ElementCompute_, typename ElementAccumulator_ = ElementCompute_, typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>, typename InnerProductOp_ = multiply_add<ElementAccumulator_> > struct ConvReferenceDispatcher; /// Dispatcher for Conv2d (partially specialized for kConvDim == 2) template < Provider kProvider, cutlass::conv::Operator kConvolutionalOperator, typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementCompute, typename ElementAccumulator, typename ConvertOp, typename InnerProductOp > struct ConvReferenceDispatcher< kProvider, kConvolutionalOperator, 2, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementCompute, ElementAccumulator, ConvertOp, InnerProductOp> { static Status dispatch( void const *configuration, ElementA *ptr_A, ElementB *ptr_B, ElementC *ptr_C, ElementC *ptr_D, ElementCompute alpha, ElementCompute beta, cudaStream_t stream = nullptr ) { Conv2dConfiguration const &config = *static_cast<Conv2dConfiguration const *>(configuration); // TODO: make below code more general. It is fixed for NHWC now. layout::TensorNHWC layout_a; layout::TensorNHWC layout_b; layout::TensorNHWC layout_c; layout_a.stride() = make_Coord(int32_t(config.stride_a[0]), int32_t(config.stride_a[1]), int32_t(config.stride_a[2])); layout_b.stride() = make_Coord(int32_t(config.stride_b[0]), int32_t(config.stride_b[1]), int32_t(config.stride_b[2])); layout_c.stride() = make_Coord(int32_t(config.stride_c[0]), int32_t(config.stride_c[1]), int32_t(config.stride_c[2])); if (kProvider == Provider::kReferenceHost) { cutlass::reference::host::Conv2d< ElementA, LayoutA, ElementB, LayoutB, ElementC , LayoutC, ElementCompute, ElementAccumulator, ElementC, ConvertOp, InnerProductOp >( kConvolutionalOperator, config.problem_size, {ptr_A, layout_a}, {ptr_B, layout_b}, {ptr_C, layout_c}, {ptr_D, layout_c}, alpha, beta ); return Status::kSuccess; } else if (kProvider == Provider::kReferenceDevice) { return cutlass::reference::device::Conv2d< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementCompute, ElementAccumulator, ConvertOp, InnerProductOp >( kConvolutionalOperator, config.problem_size, {ptr_A, layout_a}, {ptr_B, layout_b}, {ptr_C, layout_c}, {ptr_D, layout_c}, alpha, beta, stream ); } return Status::kErrorNotSupported; } }; /// Dispatcher for Conv3d (partially specialized for kConvDim == 3) template < Provider kProvider, cutlass::conv::Operator kConvolutionalOperator, typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementCompute, typename ElementAccumulator, typename ConvertOp, typename InnerProductOp > struct ConvReferenceDispatcher< kProvider, kConvolutionalOperator, 3, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementCompute, ElementAccumulator, ConvertOp, InnerProductOp> { static Status dispatch( void const *configuration, ElementA *ptr_A, ElementB *ptr_B, ElementC *ptr_C, ElementC *ptr_D, ElementCompute alpha, ElementCompute beta, cudaStream_t stream = nullptr ) { Conv3dConfiguration const &config = *static_cast<Conv3dConfiguration const *>(configuration); ConvKind const conv_kind = ConvKindMap<kConvolutionalOperator>::kId; if (kProvider == Provider::kReferenceHost) { cutlass::reference::host::Conv3d< ElementA, LayoutA, ElementB, LayoutB, ElementC , LayoutC, ElementCompute, ElementAccumulator, ConvertOp, InnerProductOp >( kConvolutionalOperator, config.problem_size, {ptr_A, config.layout_a(conv_kind)}, {ptr_B, config.layout_b(conv_kind)}, {ptr_C, config.layout_c(conv_kind)}, {ptr_D, config.layout_c(conv_kind)}, alpha, beta ); return Status::kSuccess; } else if (kProvider == Provider::kReferenceDevice) { return cutlass::reference::device::Conv3d< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementCompute, ElementAccumulator, ConvertOp, InnerProductOp >( kConvolutionalOperator, config.problem_size, {ptr_A, config.layout_a(conv_kind)}, {ptr_B, config.layout_b(conv_kind)}, {ptr_C, config.layout_c(conv_kind)}, {ptr_D, config.layout_c(conv_kind)}, alpha, beta, stream ); } return Status::kErrorNotSupported; } }; } // namespace detail /////////////////////////////////////////////////////////////////////////////////////////////////// template < Provider Provider_, cutlass::conv::Operator ConvolutionalOperator, int ConvDim, typename ElementA_, typename LayoutA_, typename ElementB_, typename LayoutB_, typename ElementC_, typename LayoutC_, typename ElementCompute_, typename ElementAccumulator_ = ElementCompute_, typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>, typename InnerProductOp_ = multiply_add<ElementAccumulator_> > class ConvReferenceOperation : public Operation { public: static Provider const kProvider = Provider_; static cutlass::conv::Operator const kConvolutionalOperator = ConvolutionalOperator; static int const kConvDim = ConvDim; using ElementA = ElementA_; using LayoutA = LayoutA_; using ElementB = ElementB_; using LayoutB = LayoutB_; using ElementC = ElementC_; using LayoutC = LayoutC_; using ElementCompute = ElementCompute_; using ElementAccumulator = ElementAccumulator_; using ConvertOp = ConvertOp_; using InnerProductOp = InnerProductOp_; protected: /// Storage for the name string std::string name_; /// ConvDescription description_; public: /// Constructor ConvReferenceOperation() { // Basic information description_.provider = kProvider; description_.kind = (kConvDim == 2 ? OperationKind::kConv2d : OperationKind::kConv3d); description_.conv_kind = ConvKindMap<kConvolutionalOperator>::kId; description_.conv_dim = kConvDim; // Tensor description description_.A = make_TensorDescription<ElementA, LayoutA>(); description_.B = make_TensorDescription<ElementB, LayoutB>(); description_.C = make_TensorDescription<ElementC, LayoutC>(); // Epilogue compute and accumulator type description description_.element_epilogue = NumericTypeMap<ElementCompute>::kId; description_.tile_description.math_instruction.element_accumulator = NumericTypeMap<ElementAccumulator>::kId; // Iterator algorithm for convolution reference description_.iterator_algorithm = IteratorAlgorithmID::kNone; // Compute capability for convolution reference description_.tile_description.minimum_compute_capability = (kProvider == Provider::kReferenceDevice ? 50 : 0); description_.tile_description.maximum_compute_capability = 1024; // Procedural name std::stringstream ss; ss << "conv" << kConvDim << "d_" << to_string(description_.conv_kind) << "_reference_" << to_string(description_.provider) << "_" << to_string(description_.A.element) << to_string(description_.A.layout) << "_" << to_string(description_.B.element) << to_string(description_.B.layout) << "_" << to_string(description_.C.element) << to_string(description_.C.layout) << "_" << to_string(description_.tile_description.math_instruction.element_accumulator); name_ = ss.str(); description_.name = name_.c_str(); // Epilogue compute and accumulator type description description_.element_epilogue = NumericTypeMap<ElementCompute>::kId; description_.tile_description.math_instruction.element_accumulator = NumericTypeMap<ElementAccumulator>::kId; } /// Returns the description of the GEMM operation virtual OperationDescription const & description() const { return description_; } virtual Status can_implement( void const *configuration, void const *arguments) const { return Status::kSuccess; } virtual uint64_t get_host_workspace_size( void const *configuration) const { switch (kConvDim) { case 2: return sizeof(Conv2dConfiguration); case 3: return sizeof(Conv3dConfiguration); default: break; } return 0; } virtual uint64_t get_device_workspace_size( void const *configuration, void const *arguments = nullptr) const { return 0; } virtual Status initialize( void const *configuration, void *host_workspace, void *device_workspace = nullptr, cudaStream_t stream = nullptr) const { std::memcpy(host_workspace, configuration, get_host_workspace_size(configuration)); return Status::kSuccess; } virtual Status run( void const *arguments, void *host_workspace, void *device_workspace = nullptr, cudaStream_t stream = nullptr) const { ConvArguments const &args = *static_cast<ConvArguments const *>(arguments); ElementCompute alpha; ElementCompute beta; alpha = *static_cast<ElementCompute const *>(args.alpha); beta = *static_cast<ElementCompute const *>(args.beta); // TODO - respect pointer mode // Invoke 2D or 3D convolution return detail::ConvReferenceDispatcher< kProvider, kConvolutionalOperator, kConvDim, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementCompute, ElementAccumulator, ConvertOp, InnerProductOp >::dispatch( host_workspace, static_cast<ElementA *>(const_cast<void *>(args.A)), static_cast<ElementB *>(const_cast<void *>(args.B)), static_cast<ElementC *>(const_cast<void *>(args.C)), static_cast<ElementC *>(args.D), alpha, beta, stream ); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// /// Constructs Fprop reference operators. template < int kConvDim, typename ElementA_, typename LayoutA_, typename ElementB_, typename LayoutB_, typename ElementC_, typename LayoutC_, typename ElementCompute_, typename ElementAccumulator_ = ElementCompute_, typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>, typename InnerProductOp_ = multiply_add<ElementAccumulator_> > void make_conv_fprop(Manifest &manifest) { manifest.append(new ConvReferenceOperation< Provider::kReferenceHost, cutlass::conv::Operator::kFprop, kConvDim, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, ElementCompute_, ElementAccumulator_, ConvertOp_, InnerProductOp_ >); manifest.append(new ConvReferenceOperation< Provider::kReferenceDevice, cutlass::conv::Operator::kFprop, kConvDim, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, ElementCompute_, ElementAccumulator_, ConvertOp_, InnerProductOp_ >); } /// Constructs Dgrad and Wgrad reference operators. template < int kConvDim, typename ElementA_, typename LayoutA_, typename ElementB_, typename LayoutB_, typename ElementC_, typename LayoutC_, typename ElementCompute_, typename ElementAccumulator_ = ElementCompute_, typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>, typename InnerProductOp_ = multiply_add<ElementAccumulator_> > void make_conv_backwards(Manifest &manifest) { manifest.append(new ConvReferenceOperation< Provider::kReferenceHost, cutlass::conv::Operator::kDgrad, kConvDim, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, ElementCompute_, ElementAccumulator_, ConvertOp_, InnerProductOp_ >); manifest.append(new ConvReferenceOperation< Provider::kReferenceDevice, cutlass::conv::Operator::kDgrad, kConvDim, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, ElementCompute_, ElementAccumulator_, ConvertOp_, InnerProductOp_ >); manifest.append(new ConvReferenceOperation< Provider::kReferenceHost, cutlass::conv::Operator::kWgrad, kConvDim, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, ElementCompute_, ElementAccumulator_, ConvertOp_, InnerProductOp_ >); manifest.append(new ConvReferenceOperation< Provider::kReferenceDevice, cutlass::conv::Operator::kWgrad, kConvDim, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, ElementCompute_, ElementAccumulator_, ConvertOp_, InnerProductOp_ >); } /// Six operators for the price of one. template < int kConvDim, typename ElementA_, typename LayoutA_, typename ElementB_, typename LayoutB_, typename ElementC_, typename LayoutC_, typename ElementCompute_, typename ElementAccumulator_ = ElementCompute_, typename ConvertOp_ = NumericConverter<ElementC_, ElementCompute_>, typename InnerProductOp_ = multiply_add<ElementAccumulator_> > void make_conv_all(Manifest &manifest) { make_conv_fprop< kConvDim, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, ElementCompute_, ElementAccumulator_, ConvertOp_, InnerProductOp_ >(manifest); make_conv_backwards< kConvDim, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, ElementCompute_, ElementAccumulator_, ConvertOp_, InnerProductOp_ >(manifest); } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/library/src/reference/conv_reference_operation.h/0
{ "file_path": "cutlass/tools/library/src/reference/conv_reference_operation.h", "repo_id": "cutlass", "token_count": 6444 }
66
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Execution environment */ #include <iostream> #include <stdexcept> // Profiler includes #include "cutlass/profiler/cutlass_profiler.h" #include "cutlass/profiler/gemm_operation_profiler.h" #include "cutlass/profiler/rank_k_operation_profiler.h" #include "cutlass/profiler/rank_2k_operation_profiler.h" #include "cutlass/profiler/trmm_operation_profiler.h" #include "cutlass/profiler/symm_operation_profiler.h" #include "cutlass/profiler/conv2d_operation_profiler.h" #include "cutlass/profiler/conv3d_operation_profiler.h" #include "cutlass/profiler/sparse_gemm_operation_profiler.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// CutlassProfiler::CutlassProfiler( Options const &options ): options_(options) { operation_profilers_.emplace_back(new GemmOperationProfiler(options)); operation_profilers_.emplace_back(new SparseGemmOperationProfiler(options)); operation_profilers_.emplace_back(new Conv2dOperationProfiler(options)); operation_profilers_.emplace_back(new Conv3dOperationProfiler(options)); operation_profilers_.emplace_back(new RankKOperationProfiler(options)); operation_profilers_.emplace_back(new Rank2KOperationProfiler(options)); operation_profilers_.emplace_back(new TrmmOperationProfiler(options)); operation_profilers_.emplace_back(new SymmOperationProfiler(options)); } CutlassProfiler::~CutlassProfiler() { } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Execute the program int CutlassProfiler::operator()() { if (options_.cmdline.num_naked_args() > 0) { std::cerr << "Unknown args: \n"; options_.cmdline.print_naked_args(std::cerr); std::cerr << "\n\n\n"; print_usage_(std::cout); return 1; } if (options_.about.help) { if (options_.operation_kind == library::OperationKind::kInvalid) { print_usage_(std::cout); } else { for (auto & profiler : operation_profilers_) { if (profiler->kind() == options_.operation_kind) { profiler->print_usage(std::cout); profiler->print_examples(std::cout); return 0; } } } return 0; } else if (options_.about.version) { options_.about.print_version(std::cout); std::cout << std::endl; return 0; } else if (options_.about.device_info) { options_.device.print_device_info(std::cout); return 0; } if (options_.execution_mode == ExecutionMode::kProfile || options_.execution_mode == ExecutionMode::kDryRun || options_.execution_mode == ExecutionMode::kTrace) { // Profiles all operations return profile_(); } else if (options_.execution_mode == ExecutionMode::kEnumerate) { // Enumerates all operations enumerate_(); } return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Enumerates all operations void CutlassProfiler::enumerate_() { } /// Profiles all operations int CutlassProfiler::profile_() { int result = 0; DeviceContext device_context; // For all profilers for (auto & profiler : operation_profilers_) { if (options_.operation_kind == library::OperationKind::kInvalid || options_.operation_kind == profiler->kind()) { result = profiler->profile_all(options_, library::Singleton::get().manifest, device_context); if (result) { return result; } } } return result; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Prints all options void CutlassProfiler::print_usage_(std::ostream &out) { options_.print_usage(out); out << "\nOperations:\n\n"; // For all profilers for (auto & profiler : operation_profilers_) { std::string kind_str = library::to_string(profiler->kind()); size_t kAlignment = 40; size_t columns = 0; if (kind_str.size() < kAlignment) { columns = kAlignment - kind_str.size(); } out << " " << kind_str << std::string(columns, ' ') << profiler->description() << "\n"; } out << "\n\nFor details about a particular function, specify the function name with --help.\n\nExample:\n\n" << " $ cutlass_profiler --operation=Gemm --help\n\n" << " $ cutlass_profiler --operation=RankK --help\n\n" << " $ cutlass_profiler --operation=Trmm --help\n\n" << " $ cutlass_profiler --operation=Symm --help\n\n" << " $ cutlass_profiler --operation=Conv3d --help\n\n" << " $ cutlass_profiler --operation=Conv2d --help\n\n" << " $ cutlass_profiler --operation=SparseGemm --help\n\n" ; } /// Prints usage void CutlassProfiler::print_options_(std::ostream &out) { options_.print_options(out); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Initializes the CUDA device void CutlassProfiler::initialize_device_() { cudaError_t result = cudaSetDevice(options_.device.device); if (result != cudaSuccess) { std::cerr << "Failed to set device."; throw std::runtime_error("Failed to set device"); } } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/profiler/src/cutlass_profiler.cu/0
{ "file_path": "cutlass/tools/profiler/src/cutlass_profiler.cu", "repo_id": "cutlass", "token_count": 2240 }
67
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Execution environment */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cutlass/core_io.h" #include "cutlass/profiler/cublas_helpers.h" #include "cutlass/profiler/trmm_operation_profiler.h" #include "cutlass/profiler/gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor TrmmOperationProfiler::TrmmOperationProfiler(Options const &options): OperationProfiler( options, library::OperationKind::kTrmm, { {ArgumentTypeID::kEnumerated, {"trmm_kind"}, "Variant of TRMM (universal)"}, {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the TRMM problem space"}, {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the TRMM problem space"}, {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, {ArgumentTypeID::kEnumerated, {"side_mode"}, "Side Mode for TRMM (left, right)"}, {ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for TRMM (lower, upper)"}, {ArgumentTypeID::kEnumerated, {"diag_type"}, "Diag Type for TRMM (nonunit, unit)"}, {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"}, {ArgumentTypeID::kTensor, {"D"}, "Tensor storing the D operand"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of TRMMs computed in one batch"}, }, { library::Provider::kCUBLAS} ) { description_ = " Triangular Matrix-Multiplication. D = alpha * A * B or alpha * B * A"; } /// Destructor TrmmOperationProfiler::~TrmmOperationProfiler() { } /// Prints usage statement for the math function void TrmmOperationProfiler::print_usage(std::ostream &out) const { out << "TRMM" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void TrmmOperationProfiler::print_examples(std::ostream &out) const { out << "\nExamples:\n\n" << "Profile a particular problem size:\n" << " $ cutlass_profiler --operation=Trmm --n=1024 --m=128\n\n" << "Schmoo over problem size and beta:\n" << " $ cutlass_profiler --operation=Trmm --n=1024:4096:256 --m=128:8192:128 --beta=0,1,2.5\n\n" << "Schmoo over accumulator types:\n" << " $ cutlass_profiler --operation=Trmm --accumulator-type=f16,f32\n\n" << "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n" << " $ cutlass_profiler --operation=Trmm --A=f16:column or --A=*:row\n\n" << "Using various input value distribution:\n" << " $ cutlass_profiler --operation=Trmm --dist=uniform,min:0,max:3\n" << " $ cutlass_profiler --operation=Trmm --dist=gaussian,mean:0,stddev:3\n" << " $ cutlass_profiler --operation=Trmm --dist=sequential,start:0,delta:1\n\n" << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n" << " $ cutlass_profiler --operation=Trmm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n" << "Test your changes to trmm kernels with a quick functional test and save results in functional-test.csv:\n" << " $ cutlass_profiler --operation=Trmm \\ \n" << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" << " --beta=0,1,2 --profiling-iterations=1 \\ \n" << " --providers=cutlass --output=functional-test.csv\n\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// #if 0 // used this for debugging static std::string byte_string(std::vector<uint8_t> const &bytes) { std::stringstream ss; ss << "0x"; for (size_t idx = bytes.size(); idx > 0; --idx) { ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); } return ss.str(); } #endif Status TrmmOperationProfiler::TrmmProblem::parse( library::TrmmDescription const &operation_desc, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!arg_as_int(this->m, "m", problem_space, problem)) { // default value this->m = 1024; } if (!arg_as_int(this->n, "n", problem_space, problem)) { // default value this->n = 1024; } if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { // default value this->split_k_slices = 1; } if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { // default value this->batch_count = 1; } if (this->split_k_slices > 1 && this->batch_count > 1) { // At least one of these must be one return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.D, "D", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar( this->alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar( this->beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } if (operation_desc.side_mode == SideMode::kLeft) { this->lda = DeviceAllocation::get_packed_layout( operation_desc.A.layout, {int(this->m), int(this->m)}).front(); } else if (operation_desc.side_mode == SideMode::kRight) { this->lda = DeviceAllocation::get_packed_layout( operation_desc.A.layout, {int(this->n), int(this->n)}).front(); } this->ldb = DeviceAllocation::get_packed_layout( operation_desc.B.layout, {int(this->m), int(this->n)}).front(); this->ldd = DeviceAllocation::get_packed_layout( operation_desc.D.layout, {int(this->m), int(this->n)}).front(); return Status::kSuccess; } /// Initializes a performance result void TrmmOperationProfiler::TrmmProblem::initialize_result( PerformanceResult &result, library::TrmmDescription const &operation_desc, ProblemSpace const &problem_space) { result.arguments.resize(problem_space.rank()); set_argument(result, "trmm_kind", problem_space, library::to_string(operation_desc.trmm_kind)); set_argument(result, "A", problem_space, std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); set_argument(result, "side_mode", problem_space, library::to_string(operation_desc.side_mode)); set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode)); set_argument(result, "diag_type", problem_space, library::to_string(operation_desc.diag_type)); set_argument(result, "B", problem_space, std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout)); set_argument(result, "D", problem_space, std::string(library::to_string(operation_desc.D.element)) + ":" + library::to_string(operation_desc.D.layout)); set_argument(result, "m", problem_space, m); set_argument(result, "n", problem_space, n); set_argument(result, "split_k_slices", problem_space, split_k_slices); set_argument(result, "batch_count", problem_space, batch_count); set_argument(result, "alpha", problem_space, library::lexical_cast(alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(beta, operation_desc.element_epilogue)); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Extracts the problem dimensions Status TrmmOperationProfiler::initialize_configuration( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::TrmmDescription const &operation_desc = static_cast<library::TrmmDescription const &>(operation->description()); if (operation_desc.trmm_kind != library::TrmmKind::kUniversal) { return Status::kErrorInvalidProblem; } Status status = problem_.parse(operation_desc, problem_space, problem); if (status != Status::kSuccess) { return status; } trmm_workspace_.configuration.problem_size.m() = int(problem_.m); trmm_workspace_.configuration.problem_size.n() = int(problem_.n); trmm_workspace_.configuration.problem_size.k() = (operation_desc.side_mode == SideMode::kLeft) ? int(problem_.m) : int(problem_.n); trmm_workspace_.configuration.lda = problem_.lda; trmm_workspace_.configuration.ldb = problem_.ldb; trmm_workspace_.configuration.ldd = problem_.ldd; //trmm_workspace_.configuration.split_k_slices = int(problem_.split_k_slices); trmm_workspace_.configuration.batch_count = int(problem_.split_k_slices); trmm_workspace_.arguments.A = nullptr; trmm_workspace_.arguments.B = nullptr; trmm_workspace_.arguments.D = nullptr; trmm_workspace_.arguments.alpha = problem_.alpha.data(); trmm_workspace_.arguments.beta = problem_.beta.data(); trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&trmm_workspace_.configuration, &trmm_workspace_.arguments); } /// Initializes the performance result void TrmmOperationProfiler::initialize_result_( PerformanceResult &result, Options const &options, library::TrmmDescription const &operation_desc, ProblemSpace const &problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; problem_.initialize_result(result, operation_desc, problem_space); OperationProfiler::initialize_result_(result, operation_desc, problem_space); if (operation_desc.side_mode == SideMode::kLeft) { // Input bytes read and Output bytes written for the trmm problem result.bytes = // Half matrix including the diagonal will have (M*(M+1))/2 elements int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) * (problem_.m + 1) / 2 + int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.m / 8) * problem_.n + int64_t(library::sizeof_bits(operation_desc.D.element) * problem_.m / 8) * problem_.n; } else if (operation_desc.side_mode == SideMode::kRight) { // Input bytes read and Output bytes written for the trmm problem result.bytes = // Half matrix including the diagonal will have (N*(N+1))/2 elements int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.n / 8) * (problem_.n + 1) / 2 + int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.m / 8) * problem_.n + int64_t(library::sizeof_bits(operation_desc.D.element) * problem_.m / 8) * problem_.n; } // FLOPs = 2 * [ ( M * (M+1)/2 * N ) ] // Beta is zero result.flops = problem_.m * (problem_.m + 1) * problem_.n; result.runtime = 0; // complex-valued support switch (operation_desc.tile_description.math_instruction.math_operation) { case library::MathOperationID::kMultiplyAddComplex: result.flops *= 4; break; case library::MathOperationID::kMultiplyAddComplexFastF32: result.flops *= 4; break; default: break; } } /// Initializes workspace Status TrmmOperationProfiler::initialize_workspace( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::TrmmDescription const &operation_desc = static_cast<library::TrmmDescription const &>(operation->description()); if (options.execution_mode != ExecutionMode::kDryRun) { int seed_shift = 0; if (operation_desc.side_mode == SideMode::kLeft) { trmm_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, {int(problem_.m), int(problem_.m)}, {int(problem_.lda)}, 1, // batch_count seed_shift++ ); } else if (operation_desc.side_mode == SideMode::kRight) { trmm_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, {int(problem_.n), int(problem_.n)}, {int(problem_.lda)}, 1, // batch_count seed_shift++ ); } trmm_workspace_.B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldb)}, 1, // batch_count seed_shift++ ); trmm_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.D.element, operation_desc.D.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldd)} ); trmm_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.D.element, operation_desc.D.layout, {int(problem_.m), int(problem_.n)}, {int(problem_.ldd)} ); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = operation->get_host_workspace_size(&trmm_workspace_.configuration); trmm_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = operation->get_device_workspace_size(&trmm_workspace_.configuration); trmm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = operation->initialize( &trmm_workspace_.configuration, trmm_workspace_.host_workspace.data(), trmm_workspace_.device_workspace.data()); } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kTrmm; results_.back().disposition = Disposition::kNotRun; for(auto provider : verification_providers_) { results_.back().verification_map[provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool TrmmOperationProfiler::verify_cutlass( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } // Initialize structure containing TRMM arguments trmm_workspace_.arguments.A = trmm_workspace_.A->data(); trmm_workspace_.arguments.B = trmm_workspace_.B->data(); trmm_workspace_.arguments.D = trmm_workspace_.Computed->data(); trmm_workspace_.arguments.alpha = problem_.alpha.data(); trmm_workspace_.arguments.beta = problem_.beta.data(); trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Run the CUTLASS operation // results_.back().status = operation->run( &trmm_workspace_.arguments, trmm_workspace_.host_workspace.data(), trmm_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { #if CUTLASS_ENABLE_CUBLAS if (options.verification.provider_enabled(library::Provider::kCUBLAS)) { // Guard against unsupported cases auto const & trmm_desc = static_cast<library::TrmmDescription const &>(operation->description()); if (cublas_satisfies(trmm_desc) == Status::kSuccess) { // call cublas verification if supported verify_with_cublas_( options, report, device_context, operation, problem_space, problem); } else { // set verification map for cublas to not supported results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported; } } #endif // #if CUTLASS_ENABLE_CUBLAS // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for(auto &m : results_.back().verification_map) { if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if(is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool TrmmOperationProfiler::verify_with_cublas_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { #if CUTLASS_ENABLE_CUBLAS library::TrmmDescription const &trmm_desc = static_cast<library::TrmmDescription const &>(operation->description()); // // Construct cuBLAS operators // CublasCreate handle; cublasStatus_t status = handle.get_cublas_create_status(); if (status != CUBLAS_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; return true; } // // Initialize state // try { // // Construct dispatcher to cublas<t>Trmm() // // Initialize structure containing TRMM arguments trmm_workspace_.arguments.A = trmm_workspace_.A->data(); trmm_workspace_.arguments.B = trmm_workspace_.B->data(); trmm_workspace_.arguments.D = trmm_workspace_.Reference->data(); trmm_workspace_.arguments.alpha = problem_.alpha.data(); trmm_workspace_.arguments.beta = problem_.beta.data(); trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; detail::cublasTrmmDispatcher trmm_op( trmm_desc, trmm_workspace_.configuration, trmm_workspace_.arguments ); if (trmm_op.status != Status::kSuccess) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun; return true; } results_.back().status = Status::kSuccess; status = trmm_op(handle); // Handle errors if (status != CUBLAS_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; return true; } // // Verify results // results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors( options, *trmm_workspace_.Computed, *trmm_workspace_.Reference ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) { save_workspace( device_context, options, trmm_desc, library::Provider::kCUTLASS, library::Provider::kCUBLAS); } } catch (...) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; } #endif // Return true means continue profiling return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Measures performance results bool TrmmOperationProfiler::profile( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { // Initialize structure containing TRMM arguments trmm_workspace_.arguments.A = trmm_workspace_.A->data(); trmm_workspace_.arguments.B = trmm_workspace_.B->data(); trmm_workspace_.arguments.D = trmm_workspace_.Computed->data(); trmm_workspace_.arguments.alpha = problem_.alpha.data(); trmm_workspace_.arguments.beta = problem_.beta.data(); trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; results_.back().status = profile_cutlass_( results_.back().runtime, options, operation, &trmm_workspace_.arguments, trmm_workspace_.host_workspace.data(), trmm_workspace_.device_workspace.data() ); } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/profiler/src/trmm_operation_profiler.cu/0
{ "file_path": "cutlass/tools/profiler/src/trmm_operation_profiler.cu", "repo_id": "cutlass", "token_count": 8624 }
68
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once /*! \file \brief This header contains a class to parametrize a statistical distribution function. */ #include <ostream> namespace cutlass { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Distribution type struct Distribution { /// Variant types enum Kind { Invalid, Uniform, Gaussian, Identity, Sequential, AllZeros, AllOnes }; /// Distribution state union { /// Uniform distribution struct { double min; double max; } uniform; /// Gaussian distribution struct { double mean; double stddev; double pnz; double pnzA; double pnzB; double pnzC; } gaussian; /// Elements are linear combination of row and column index struct { double start; double delta; } sequential; }; /// Active variant kind Kind kind; /// Random values are cast to integer after scaling by this power of two int int_scale; // // Methods // Distribution() : kind(Invalid), int_scale(0) {} /// Configures distribution as uniform random Distribution &set_uniform(double _min, double _max, int _int_scale = 0) { kind = Uniform; uniform.min = _min; uniform.max = _max; int_scale = _int_scale; return *this; } /// Configures distribution as Gaussian distribution Distribution &set_gaussian(double _mean, double _stddev, int _int_scale = 0, double _pnz = 100.0) { kind = Gaussian; gaussian.mean = _mean; gaussian.stddev = _stddev; gaussian.pnz = _pnz; int_scale = _int_scale; return *this; } /// Sets identity Distribution &set_identity() { kind = Identity; return *this; } /// Sets sequential Distribution &set_sequential(double start, double delta, int _int_scale = 0) { kind = Sequential; sequential.start = start; sequential.delta = delta; int_scale = _int_scale; return *this; } }; } // namespace cutlass //////////////////////////////////////////////////////////////////////////////////////////////////// /// Prints a Distribution to ostream inline std::ostream &operator<<(std::ostream &out, cutlass::Distribution const &dist) { switch (dist.kind) { case cutlass::Distribution::Uniform: out << "uniform, min: " << dist.uniform.min << ", max: " << dist.uniform.max; break; case cutlass::Distribution::Gaussian: out << "gaussian, mean: " << dist.gaussian.mean << ", stddev: " << dist.gaussian.stddev << ", pnzA: " << dist.gaussian.pnzA << ", pnzB: " << dist.gaussian.pnzB << ", pnzC: " << dist.gaussian.pnzC; break; case cutlass::Distribution::Identity: out << "identity"; break; case cutlass::Distribution::Sequential: out << "sequential"; break; default: out << "unknown"; } out << ", int_scale: " << dist.int_scale; return out; } ////////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/util/include/cutlass/util/distribution.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/distribution.h", "repo_id": "cutlass", "token_count": 1485 }
69
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Reference implementation for complex-valued GEMM in device code. */ #pragma once #include "cutlass/coord.h" #include "cutlass/complex.h" #include "cutlass/matrix_coord.h" #include "cutlass/numeric_types.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/tensor_ref_planar_complex.h" #include "cutlass/tensor_view.h" #include "cutlass/gemm/gemm.h" namespace cutlass { namespace reference { namespace device { //////////////////////////////////////////////////////////////////////////////////////////////////// namespace kernel { //////////////////////////////////////////////////////////////////////////////////////////////////// static int const kGemmPlanarComplexBlockSize = 4; template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ScalarType, typename ComputeType, typename ConvertOp = NumericConverter<ElementC, ScalarType>, typename InnerProductOp = multiply_add<complex<ComputeType>> > __global__ void GemmPlanarComplex( gemm::GemmCoord problem_size, complex<ScalarType> alpha, TensorRefPlanarComplex<ElementA, LayoutA> tensor_a, ComplexTransform transform_a, TensorRefPlanarComplex<ElementB, LayoutB> tensor_b, ComplexTransform transform_b, complex<ScalarType> beta, TensorRefPlanarComplex<ElementC, LayoutC> tensor_c, TensorRefPlanarComplex<ElementC, LayoutC> tensor_d, complex<ComputeType> initial_accum) { int const kMblock = kGemmPlanarComplexBlockSize; int const kNblock = kGemmPlanarComplexBlockSize; using ComplexA = typename TensorRefPlanarComplex<ElementA, LayoutA>::ComplexElement; using ComplexB = typename TensorRefPlanarComplex<ElementB, LayoutB>::ComplexElement; using ComplexC = typename TensorRefPlanarComplex<ElementC, LayoutC>::ComplexElement; // Note: batch is ignored. int const M = problem_size.m(); int const N = problem_size.n(); int const K = problem_size.k(); ConvertOp convert_op; InnerProductOp inner_product_op; complex<ComputeType> accum[kMblock][kNblock]; int row_block = (blockIdx.x * blockDim.x + threadIdx.x) * kMblock; int col_block = (blockIdx.y * blockDim.y + threadIdx.y) * kNblock; CUTLASS_PRAGMA_UNROLL for (int j = 0; j < kNblock; j++) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kMblock; i++) { accum[i][j] = initial_accum; } } CUTLASS_PRAGMA_NO_UNROLL for (int k_block = 0; k_block < K; ++k_block) { CUTLASS_PRAGMA_UNROLL for (int j = 0; j < kNblock; j++) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kMblock; i++) { int row = row_block + i; int col = col_block + j; if (row < M && col < N) { ComplexA a_ik = tensor_a.at(MatrixCoord(row, k_block)); ComplexB b_kj = tensor_b.at(MatrixCoord(k_block, col)); complex<ComputeType> a = complex<ComputeType>{ ComputeType(a_ik.real()), ComputeType(a_ik.imag()) }; complex<ComputeType> b = complex<ComputeType>{ ComputeType(b_kj.real()), ComputeType(b_kj.imag()) }; if (transform_a == ComplexTransform::kConjugate) { a = conj(a); } if (transform_b == ComplexTransform::kConjugate) { b = conj(b); } accum[i][j] = inner_product_op(a, b, accum[i][j]); } } } } CUTLASS_PRAGMA_UNROLL for (int j = 0; j < kNblock; j++) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kMblock; i++) { int row = row_block + i; int col = col_block + j; MatrixCoord coord = MatrixCoord(row, col); if (row < M && col < N) { complex<ScalarType> acc{ ScalarType(accum[i][j].real()), ScalarType(accum[i][j].imag()) }; ComplexC c_ij = ComplexC(); if (beta.real() != ScalarType() || beta.imag() != ScalarType()) { c_ij = tensor_c.at(coord); } complex<ScalarType> src{ ScalarType(c_ij.real()), ScalarType(c_ij.imag()) }; complex<ScalarType> result = alpha * acc + beta * src; ComplexC d_ij; d_ij.real() = convert_op(result.real()); d_ij.imag() = convert_op(result.imag()); tensor_d.at(coord) = d_ij; } } } } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel //////////////////////////////////////////////////////////////////////////////////////////////////// /// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef /// objects. /// /// Explicitly naming types needed by this template can be cumbersome, particularly for the /// accumulator type, so a function argument 'initial_accum' is exposed. Passing /// AccumulatorType(0) as the last function argument can be easier than naming all template /// arguments explicitly. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ScalarType, typename ComputeType, typename ConvertOp = NumericConverter<ElementC, ScalarType>, typename InnerProductOp = multiply_add<complex<ComputeType>> > void GemmPlanarComplex( gemm::GemmCoord problem_size, complex<ScalarType> alpha, TensorRefPlanarComplex<ElementA, LayoutA> tensor_a, ComplexTransform transform_a, TensorRefPlanarComplex<ElementB, LayoutB> tensor_b, ComplexTransform transform_b, complex<ScalarType> beta, TensorRefPlanarComplex<ElementC, LayoutC> tensor_c, TensorRefPlanarComplex<ElementC, LayoutC> tensor_d, complex<ComputeType> initial_accum) { static_assert( LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, "Tensors must be of rank 2"); int const kMblock = kernel::kGemmPlanarComplexBlockSize; int const kNblock = kernel::kGemmPlanarComplexBlockSize; dim3 block(16, 8); dim3 grid( (problem_size.m() + block.x * kMblock - 1) / (block.x * kMblock), (problem_size.n() + block.y * kNblock - 1) / (block.y * kNblock), 1); kernel::GemmPlanarComplex< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType, ComputeType, ConvertOp, InnerProductOp ><<< grid, block >>>( problem_size, alpha, tensor_a, transform_a, tensor_b, transform_b, beta, tensor_c, tensor_d, initial_accum ); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef /// objects. /// /// This assumes the accumulator type is the same type as the scalars. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ScalarType > void GemmPlanarComplex( gemm::GemmCoord problem_size, complex<ScalarType> alpha, TensorRefPlanarComplex<ElementA, LayoutA> tensor_a, ComplexTransform transform_a, TensorRefPlanarComplex<ElementB, LayoutB> tensor_b, ComplexTransform transform_b, complex<ScalarType> beta, TensorRefPlanarComplex<ElementC, LayoutC> tensor_c, TensorRefPlanarComplex<ElementC, LayoutC> tensor_d) { GemmPlanarComplex( problem_size, alpha, tensor_a, transform_a, tensor_b, transform_b, beta, tensor_c, tensor_d, complex<ScalarType>()); } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace reference } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/util/include/cutlass/util/reference/device/gemm_planar_complex.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/reference/device/gemm_planar_complex.h", "repo_id": "cutlass", "token_count": 3432 }
70
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Reference implementation for complex-valued GEMM in host-side code. */ #pragma once #include "cutlass/coord.h" #include "cutlass/complex.h" #include "cutlass/numeric_types.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/matrix_coord.h" #include "cutlass/tensor_view.h" #include "cutlass/gemm/gemm.h" namespace cutlass { namespace reference { namespace host { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef /// objects. /// /// Explicitly naming types needed by this template can be cumbersome, particularly for the /// accumulator type, so a function argument 'initial_accum' is exposed. Passing /// AccumulatorType(0) as the last function argument can be easier than naming all template /// arguments explicitly. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ScalarType, typename ComputeType, typename ElementD = ElementC, typename ConvertOp = NumericConverter<ElementD, ScalarType>, typename InnerProductOp = multiply_add<ComputeType> > void GemmComplex( gemm::GemmCoord problem_size, ScalarType alpha, TensorRef<ElementA, LayoutA> tensor_a, ComplexTransform transform_a, TensorRef<ElementB, LayoutB> tensor_b, ComplexTransform transform_b, ScalarType beta, TensorRef<ElementC, LayoutC> tensor_c, TensorRef<ElementD, LayoutC> tensor_d, ComputeType initial_accum, int batch_count = 1, int64_t batch_stride_A = 0, int64_t batch_stride_B = 0, int64_t batch_stride_C = 0, int64_t batch_stride_D = 0) { static_assert( LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, "Tensors must be of rank 2"); // Note: batch is ignored. int const M = problem_size.m(); int const N = problem_size.n(); int const K = problem_size.k(); // Blocking necessary to speedup reference implementation int const Mblock = 16; int const Nblock = 16; ConvertOp convert_op; InnerProductOp inner_product_op; for (int batch_idx = 0; batch_idx < batch_count; ++batch_idx) { // Compute matrix product using blocks for (int row_block = 0; row_block < M; row_block += Mblock) { for (int col_block = 0; col_block < N; col_block += Nblock) { ComputeType accum[Mblock][Nblock]; for (int j = 0; j < Nblock; j++) { for (int i = 0; i < Mblock; i++) { accum[i][j] = initial_accum; } } for (int k_block = 0; k_block < K; ++k_block) { for (int j = 0; j < Nblock; j++) { for (int i = 0; i < Mblock; i++) { int row = row_block + i; int col = col_block + j; if (row < M && col < N) { ElementA a = tensor_a.at(MatrixCoord(row, k_block)); ElementB b = tensor_b.at(MatrixCoord(k_block, col)); ComputeType a_ik = ComputeType(a); ComputeType b_kj = ComputeType(b); if (transform_a == ComplexTransform::kConjugate) { a_ik = conj(a_ik); } if (transform_b == ComplexTransform::kConjugate) { b_kj = conj(b_kj); } accum[i][j] = inner_product_op(a_ik, b_kj, accum[i][j]); } } } } for (int j = 0; j < Nblock; j++) { for (int i = 0; i < Mblock; i++) { int row = row_block + i; int col = col_block + j; MatrixCoord coord = MatrixCoord(row, col); if (row < M && col < N) { tensor_d.at(coord) = convert_op( alpha * ScalarType(accum[i][j]) + beta * ScalarType(tensor_c.at(coord))); } } } } // for (col_block) } // for (row_block) tensor_a.add_pointer_offset(batch_stride_A); tensor_b.add_pointer_offset(batch_stride_B); tensor_c.add_pointer_offset(batch_stride_C); tensor_d.add_pointer_offset(batch_stride_D); } // for (batch_idx) } //////////////////////////////////////////////////////////////////////////////////////////////////// /// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef /// objects. /// /// This assumes the accumulator type is the same type as the scalars. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ScalarType, typename ElementD = ElementC > void GemmComplex( gemm::GemmCoord problem_size, ScalarType alpha, TensorRef<ElementA, LayoutA> tensor_a, ComplexTransform transform_a, TensorRef<ElementB, LayoutB> tensor_b, ComplexTransform transform_b, ScalarType beta, TensorRef<ElementC, LayoutC> tensor_c, TensorRef<ElementD, LayoutC> tensor_d) { GemmComplex(problem_size, alpha, tensor_a, transform_a, tensor_b, transform_b, beta, tensor_c, tensor_d, ScalarType(0)); } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace host } // namespace reference } // namespace cutlass
cutlass/tools/util/include/cutlass/util/reference/host/gemm_complex.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/reference/host/gemm_complex.h", "repo_id": "cutlass", "token_count": 2608 }
71
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cmath> #include "cutlass/cutlass.h" #include "cutlass/complex.h" #include "cutlass/tensor_ref.h" #include "cutlass/util/reference/detail/linear_to_coordinate.h" #include "cutlass/core_io.h" namespace cutlass { namespace reference { namespace host { /////////////////////////////////////////////////////////////////////////////////////////////////// /// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side /// workspace template < typename Element, typename Layout, typename ComputeType, typename ReduceOp, typename TransformOp > ComputeType TensorTransformReduce( TensorView<Element, Layout> view, ComputeType identity, ReduceOp reduce, TransformOp transform ) { for (int64_t idx = 0; idx < int64_t(view.size()); ++idx) { typename Layout::TensorCoord coord; cutlass::reference::detail::LinearToCoordinate<Layout::kRank>()(coord, idx, view.extent()); if (view.contains(coord)) { Element x = view.at(coord); identity = reduce(identity, transform(x)); } } return identity; } /// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side /// workspace template < typename Element, typename Layout, typename ComputeType, typename ReduceOp, typename TransformOp > ComputeType TensorTransformReduce( TensorView<Element, Layout> view_A, TensorView<Element, Layout> view_B, ComputeType identity, ReduceOp reduce, TransformOp transform) { if (view_A.extent() != view_B.extent()) { throw std::runtime_error("Tensor extents must match."); } for (int64_t idx = 0; idx < int64_t(view_A.size()); ++idx) { typename Layout::TensorCoord coord; cutlass::reference::detail::LinearToCoordinate<Layout::kRank>()(coord, idx, view_A.extent()); if (view_A.contains(coord)) { Element a = view_A.at(coord); Element b = view_B.at(coord); identity = reduce(identity, transform(a, b)); } } return identity; } /// Helper to compute the sum of the elements of a tensor template < typename Element, typename Layout, typename ComputeType = Element > ComputeType TensorSum( TensorView<Element, Layout> view, ComputeType identity = ComputeType() ) { plus<ComputeType> reduce; NumericConverter<ComputeType, Element> transform; return TensorTransformReduce( view, identity, reduce, transform); } /// Helper to compute the sum of the squares of the elements of a tensor template < typename Element, typename Layout, typename ComputeType = Element > ComputeType TensorSumSq( TensorView<Element, Layout> view, ComputeType identity = ComputeType() ) { plus<ComputeType> reduce; magnitude_squared<Element, ComputeType> transform; return TensorTransformReduce( view, identity, reduce, transform); } /// Helper to compute the norm of the elements of a tensor. template < typename Element, typename Layout, typename ComputeType = double > ComputeType TensorNorm( TensorView<Element, Layout> view, ComputeType identity = ComputeType() ) { return std::sqrt(TensorSumSq(view, identity)); } /// Helper to compute the sum of the squares of the differences of two tensors template < typename Element, typename Layout, typename ComputeType = double > ComputeType TensorSumSqDiff( TensorView<Element, Layout> view_A, TensorView<Element, Layout> view_B, ComputeType identity = ComputeType() ) { plus<ComputeType> reduce; magnitude_squared_difference<Element, ComputeType> transform; return TensorTransformReduce( view_A, view_B, identity, reduce, transform); } /// Helper to compute the norm of the tensor computed as the difference of two tensors in memory template < typename Element, typename Layout, typename ComputeType = double > ComputeType TensorNormDiff( TensorView<Element, Layout> view_A, TensorView<Element, Layout> view_B, ComputeType identity = ComputeType() ) { return std::sqrt(TensorSumSqDiff(view_A, view_B, identity)); } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace host } // namespace reference } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/util/include/cutlass/util/reference/host/tensor_reduce.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/reference/host/tensor_reduce.h", "repo_id": "cutlass", "token_count": 1810 }
72
# Doxyfile 1.8.5 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a double hash (##) is considered a comment and is placed in # front of the TAG it is preceding. # # All text after a single hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists, items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (\" \"). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all text # before the first occurrence of this tag. Doxygen uses libiconv (or the iconv # built into libc) for the transcoding. See http://www.gnu.org/software/libiconv # for the list of possible encodings. # The default value is: UTF-8. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded by # double-quotes, unless you are using Doxywizard) that should identify the # project for which the documentation is generated. This name is used in the # title of most generated pages and in a few other places. # The default value is: My Project. PROJECT_NAME = "CUTLASS" # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version # control system is used. PROJECT_NUMBER = # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a # quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = "CUDA Templates for Linear Algebra Subroutines and Solvers" # With the PROJECT_LOGO tag one can specify an logo or icon that is included in # the documentation. The maximum height of the logo should not exceed 55 pixels # and the maximum width should not exceed 200 pixels. Doxygen will copy the logo # to the output directory. PROJECT_LOGO = media/images/cutlass-logo-small.png # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. OUTPUT_DIRECTORY = doxygen # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and # will distribute the generated files over these directories. Enabling this # option can be useful when feeding doxygen a huge amount of source files, where # putting all generated files in the same directory would otherwise causes # performance problems for the file system. # The default value is: NO. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # Possible values are: Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese- # Traditional, Croatian, Czech, Danish, Dutch, English, Esperanto, Farsi, # Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en, # Korean, Korean-en, Latvian, Norwegian, Macedonian, Persian, Polish, # Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish, # Turkish, Ukrainian and Vietnamese. # The default value is: English. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member # descriptions after the members that are listed in the file and class # documentation (similar to Javadoc). Set to NO to disable this. # The default value is: YES. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief # description of a member or function before the detailed description # # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. # The default value is: YES. REPEAT_BRIEF = NO # This tag implements a quasi-intelligent brief description abbreviator that is # used to form the text in various listings. Each string in this list, if found # as the leading text of the brief description, will be stripped from the text # and the result, after processing the whole list, is used as the annotated # text. Otherwise, the brief description is used as-is. If left blank, the # following values are used ($name is automatically replaced with the name of # the entity):The $name class, The $name widget, The $name file, is, provides, # specifies, contains, represents, a, an and the. ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # doxygen will generate a detailed section even if there is only a brief # description. # The default value is: NO. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. # The default value is: NO. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path # before files name in the file list and in the header files. If set to NO the # shortest path that makes the file name unique will be used # The default value is: YES. FULL_PATH_NAMES = NO # The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. # Stripping is only done if one of the specified strings matches the left-hand # part of the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the path to # strip. # # Note that you can specify absolute paths here, but also relative paths, which # will be relative from the directory where doxygen is started. # This tag requires that the tag FULL_PATH_NAMES is set to YES. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the # path mentioned in the documentation of a class, which tells the reader which # header file to include in order to use a class. If left blank only the name of # the header file containing the class definition is used. Otherwise one should # specify the list of include paths that are normally passed to the compiler # using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but # less readable) file names. This can be useful is your file systems doesn't # support long names like on DOS, Mac, or CD-ROM. # The default value is: NO. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the # first line (until the first dot) of a Javadoc-style comment as the brief # description. If set to NO, the Javadoc-style will behave just like regular Qt- # style comments (thus requiring an explicit @brief command for a brief # description.) # The default value is: NO. JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If # set to NO, the Qt-style will behave just like regular Qt-style comments (thus # requiring an explicit \brief command for a brief description.) # The default value is: NO. QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a # multi-line C++ special comment block (i.e. a block of //! or /// comments) as # a brief description. This used to be the default behavior. The new default is # to treat a multi-line C++ comment block as a detailed description. Set this # tag to YES if you prefer the old behavior instead. # # Note that setting this tag to YES also means that rational rose comments are # not recognized any more. # The default value is: NO. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the # documentation from any documented member that it re-implements. # The default value is: YES. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a # new page for each member. If set to NO, the documentation of a member will be # part of the file/class/namespace that contains it. # The default value is: NO. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen # uses this value to replace tabs by spaces in code fragments. # Minimum value: 1, maximum value: 16, default value: 4. TAB_SIZE = 2 # This tag can be used to specify a number of aliases that act as commands in # the documentation. An alias has the form: # name=value # For example adding # "sideeffect=@par Side Effects:\n" # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading # "Side Effects:". You can put \n's in the value part of an alias to insert # newlines. #ALIASES += "concept{1}=@ingroup \1\n@par Implemented concepts:\n@ref \1" ALIASES += "concept{1}=@ingroup \1" # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" # will allow you to use the command class in the itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all # members will be omitted, etc. # The default value is: NO. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or # Python sources only. Doxygen will then generate output that is more tailored # for that language. For instance, namespaces will be presented as packages, # qualified scopes will look different, etc. # The default value is: NO. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources. Doxygen will then generate output that is tailored for Fortran. # The default value is: NO. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for VHDL. # The default value is: NO. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and # language is one of the parsers supported by doxygen: IDL, Java, Javascript, # C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make # doxygen treat .inc files as Fortran files (default is PHP), and .f files as C # (default is Fortran), use: inc=Fortran f=C. # # Note For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. EXTENSION_MAPPING = cu=C++ # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you can # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in # case of backward compatibilities issues. # The default value is: YES. MARKDOWN_SUPPORT = YES # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can # be prevented in individual cases by by putting a % sign in front of the word # or globally by setting AUTOLINK_SUPPORT to NO. # The default value is: YES. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should set this # tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); # versus func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. # The default value is: NO. BUILTIN_STL_SUPPORT = YES # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. # The default value is: NO. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip (see: # http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen # will parse them like normal C++ but will assume all classes use public instead # of private inheritance when no explicit protection keyword is present. # The default value is: NO. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES will make # doxygen to replace the get and set methods by a property in the documentation. # This will only work if the methods are indeed getting or setting a simple # type. If this is not the case, or you want to show the methods anyway, you # should set this option to NO. # The default value is: YES. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. # The default value is: NO. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES to allow class member groups of the same type # (for instance a group of public functions) to be put as a subgroup of that # type (e.g. under the Public Functions section). Set it to NO to prevent # subgrouping. Alternatively, this can be done per class using the # \nosubgrouping command. # The default value is: YES. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions # are shown inside the group in which they are included (e.g. using \ingroup) # instead of on a separate page (for HTML and Man pages) or section (for LaTeX # and RTF). # # Note that this feature does not work in combination with # SEPARATE_MEMBER_PAGES. # The default value is: NO. INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions # with only public data fields or simple typedef fields will be shown inline in # the documentation of the scope in which they are defined (i.e. file, # namespace, or group documentation), provided this scope is documented. If set # to NO, structs, classes, and unions are shown on a separate page (for HTML and # Man pages) or section (for LaTeX and RTF). # The default value is: NO. INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or # enum is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically be # useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. # The default value is: NO. TYPEDEF_HIDES_STRUCT = NO # The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This # cache is used to resolve symbols given their name and scope. Since this can be # an expensive process and often the same symbol appears multiple times in the # code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small # doxygen will become slower. If the cache is too large, memory is wasted. The # cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range # is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 # symbols. At the end of a run doxygen will report the cache usage and suggest # the optimal cache size from a speed point of view. # Minimum value: 0, maximum value: 9, default value: 0. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. Private # class members and static file members will be hidden unless the # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. # Note: This will also disable the warnings about undocumented members that are # normally produced when WARNINGS is set to YES. # The default value is: NO. EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class will # be included in the documentation. # The default value is: NO. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal # scope will be included in the documentation. # The default value is: NO. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file will be # included in the documentation. # The default value is: NO. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined # locally in source files will be included in the documentation. If set to NO # only classes defined in header files are included. Does not have any effect # for Java sources. # The default value is: YES. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local methods, # which are defined in the implementation section but not in the interface are # included in the documentation. If set to NO only methods in the interface are # included. # The default value is: NO. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base name of # the file that contains the anonymous namespace. By default anonymous namespace # are hidden. # The default value is: NO. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation # section is generated. This option has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. If set # to NO these classes will be included in the various overviews. This option has # no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend # (class|struct|union) declarations. If set to NO these declarations will be # included in the documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any # documentation blocks found inside the body of a function. If set to NO these # blocks will be appended to the function's detailed documentation block. # The default value is: NO. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation that is typed after a # \internal command is included. If the tag is set to NO then the documentation # will be excluded. Set it to YES to include the internal documentation. # The default value is: NO. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file # names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. # The default value is: system dependent. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with # their full class and namespace scopes in the documentation. If set to YES the # scope will be hidden. # The default value is: NO. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include # files with double quotes in the documentation rather than with sharp brackets. # The default value is: NO. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the # documentation for inline members. # The default value is: YES. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the # (detailed) documentation of file and class members alphabetically by member # name. If set to NO the members will appear in declaration order. # The default value is: YES. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief # descriptions of file, namespace and class members alphabetically by member # name. If set to NO the members will appear in declaration order. # The default value is: NO. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the # (brief and detailed) documentation of class members so that constructors and # destructors are listed first. If set to NO the constructors will appear in the # respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. # Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief # member documentation. # Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting # detailed member documentation. # The default value is: NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy # of group names into alphabetical order. If set to NO the group names will # appear in their defined order. # The default value is: NO. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by # fully-qualified names, including namespaces. If set to NO, the class list will # be sorted only by class name, not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the alphabetical # list. # The default value is: NO. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper # type resolution of all parameters of a function it will reject a match between # the prototype and the implementation of a member function even if there is # only one candidate or it is obvious which candidate to choose by doing a # simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still # accept a match between prototype and implementation in such cases. # The default value is: NO. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the # todo list. This list is created by putting \todo commands in the # documentation. # The default value is: YES. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the # test list. This list is created by putting \test commands in the # documentation. # The default value is: YES. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug # list. This list is created by putting \bug commands in the documentation. # The default value is: YES. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) # the deprecated list. This list is created by putting \deprecated commands in # the documentation. # The default value is: YES. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional documentation # sections, marked by \if <section_label> ... \endif and \cond <section_label> # ... \endcond blocks. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the # initial value of a variable or macro / define can have for it to appear in the # documentation. If the initializer consists of more lines than specified here # it will be hidden. Use a value of 0 to hide initializers completely. The # appearance of the value of individual variables and macros / defines can be # controlled using \showinitializer or \hideinitializer command in the # documentation regardless of this setting. # Minimum value: 0, maximum value: 10000, default value: 30. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at # the bottom of the documentation of classes and structs. If set to YES the list # will mention the files that were used to generate the documentation. # The default value is: YES. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. This # will remove the Files entry from the Quick Index and from the Folder Tree View # (if specified). # The default value is: YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces # page. This will remove the Namespaces entry from the Quick Index and from the # Folder Tree View (if specified). # The default value is: YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command command input-file, where command is the value of the # FILE_VERSION_FILTER tag, and input-file is the name of an input file provided # by doxygen. Whatever the program writes to standard output is used as the file # version. For an example see the documentation. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. You can # optionally specify a file name after the option, if omitted DoxygenLayout.xml # will be used as the name of the layout file. # # Note that if you run doxygen from a directory containing a file called # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE # tag is left empty. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files containing # the reference definitions. This must be a list of .bib files. The .bib # extension is automatically appended if omitted. This requires the bibtex tool # to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. # For LaTeX the style of the bibliography can be controlled using # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the # search path. Do not use file names with spaces, bibtex cannot handle them. See # also \cite for info how to create references. CITE_BIB_FILES = #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated to # standard output by doxygen. If QUIET is set to YES this implies that the # messages are off. # The default value is: NO. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES # this implies that the warnings are on. # # Tip: Turn warnings on while writing the documentation. # The default value is: YES. WARNINGS = YES # If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag # will automatically be disabled. # The default value is: YES. WARN_IF_UNDOCUMENTED = YES # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some parameters # in a documented function, or documenting parameters that don't exist or using # markup commands wrongly. # The default value is: YES. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return # value. If set to NO doxygen will only warn about wrong or incomplete parameter # documentation, but not about the absence of documentation. # The default value is: NO. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which # will be replaced by the file and line number from which the warning originated # and the warning text. Optionally the format may contain $version, which will # be replaced by the version of the file (if it could be obtained via # FILE_VERSION_FILTER) # The default value is: $file:$line: $text. WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning and error # messages should be written. If left blank the output is written to standard # error (stderr). WARN_LOGFILE = #--------------------------------------------------------------------------- # Configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag is used to specify the files and/or directories that contain # documented source files. You may enter file names like myfile.cpp or # directories like /usr/src/myproject. Separate the files or directories with # spaces. # Note: If this tag is empty the current directory is searched. INPUT = include/cutlass tools/util/include/cutlass/ tools/library/include/cutlass/ INPUT += media/docs/doxygen_mainpage.md # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv # documentation (see: http://www.gnu.org/software/libiconv) for the list of # possible encodings. # The default value is: UTF-8. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank the # following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, # *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, # *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, # *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, # *.qsf, *.as and *.js. FILE_PATTERNS = # The RECURSIVE tag can be used to specify whether or not subdirectories should # be searched for input files as well. # The default value is: NO. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. # The default value is: NO. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories use the pattern */test/* EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or directories # that contain example code fragments that are included (see the \include # command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank all # files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude commands # irrespective of the value of the RECURSIVE tag. # The default value is: NO. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or directories # that contain images that are to be included in the documentation (see the # \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command: # # <filter> <input-file> # # where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the # name of an input file. Doxygen will then use the output that the filter # program writes to standard output. If FILTER_PATTERNS is specified, this tag # will be ignored. # # Note that the filter must not add or remove lines; it is applied before the # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: pattern=filter # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how # filters are used. If the FILTER_PATTERNS tag is empty or if none of the # patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER ) will also be used to filter the input files that are used for # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). # The default value is: NO. FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) and # it is also possible to disable source filtering for a specific pattern using # *.ext= (so without naming a filter). # This tag requires that the tag FILTER_SOURCE_FILES is set to YES. FILTER_SOURCE_PATTERNS = # If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = media/docs/doxygen_mainpage.md #--------------------------------------------------------------------------- # Configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will be # generated. Documented entities will be cross-referenced with these sources. # # Note: To get rid of all source code in the generated output, make sure that # also VERBATIM_HEADERS is set to NO. # The default value is: NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body of functions, # classes and enums directly into the documentation. # The default value is: NO. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any # special comment blocks from generated source code fragments. Normal C, C++ and # Fortran comments will always remain visible. # The default value is: YES. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES then for each documented # function all documented functions referencing it will be listed. # The default value is: NO. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES then for each documented function # all documented entities called/used by that function will be listed. # The default value is: NO. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set # to YES, then the hyperlinks from functions in REFERENCES_RELATION and # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will # link to the documentation. # The default value is: YES. REFERENCES_LINK_SOURCE = YES # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the # source code will show a tooltip with additional information such as prototype, # brief description and links to the definition and documentation. Since this # will make the HTML file larger and loading of large files a bit slower, you # can opt to disable this feature. # The default value is: YES. # This tag requires that the tag SOURCE_BROWSER is set to YES. SOURCE_TOOLTIPS = YES # If the USE_HTAGS tag is set to YES then the references to source code will # point to the HTML generated by the htags(1) tool instead of doxygen built-in # source browser. The htags tool is part of GNU's global source tagging system # (see http://www.gnu.org/software/global/global.html). You will need version # 4.8.6 or higher. # # To use it do the following: # - Install the latest version of global # - Enable SOURCE_BROWSER and USE_HTAGS in the config file # - Make sure the INPUT points to the root of the source tree # - Run doxygen as normal # # Doxygen will invoke htags (and that will in turn invoke gtags), so these # tools must be available from the command line (i.e. in the search path). # # The result: instead of the source browser generated by doxygen, the links to # source code will now point to the output of htags. # The default value is: NO. # This tag requires that the tag SOURCE_BROWSER is set to YES. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a # verbatim copy of the header file for each class for which an include is # specified. Set to NO to disable this. # See also: Section \class. # The default value is: YES. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all # compounds will be generated. Enable this if the project contains a lot of # classes, structs, unions or interfaces. # The default value is: YES. ALPHABETICAL_INDEX = YES # The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in # which the alphabetical index list will be split. # Minimum value: 1, maximum value: 20, default value: 5. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored # while generating the index headers. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. IGNORE_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES doxygen will generate HTML output # The default value is: YES. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_OUTPUT = # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each # generated HTML page (for example: .htm, .php, .asp). # The default value is: .html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a user-defined HTML header file for # each generated HTML page. If the tag is left blank doxygen will generate a # standard header. # # To get valid HTML the header file that includes any scripts and style sheets # that doxygen needs, which is dependent on the configuration options used (e.g. # the setting GENERATE_TREEVIEW). It is highly recommended to start with a # default header using # doxygen -w html new_header.html new_footer.html new_stylesheet.css # YourConfigFile # and then modify the file new_header.html. See also section "Doxygen usage" # for information on how to generate the default header that doxygen normally # uses. # Note: The header is subject to change so you typically have to regenerate the # default header when upgrading to a newer version of doxygen. For a description # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard # footer. See HTML_HEADER for more information on how to generate a default # footer and what special commands can be used inside the footer. See also # section "Doxygen usage" for information on how to generate the default footer # that doxygen normally uses. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style # sheet that is used by each HTML page. It can be used to fine-tune the look of # the HTML output. If left blank doxygen will generate a default style sheet. # See also section "Doxygen usage" for information on how to generate the style # sheet that doxygen normally uses. # Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as # it is more robust and this tag (HTML_STYLESHEET) will in the future become # obsolete. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user- # defined cascading style sheet that is included after the standard style sheets # created by doxygen. Using this option one can overrule certain style aspects. # This is preferred over using HTML_STYLESHEET since it does not replace the # standard style sheet and is therefor more robust against future updates. # Doxygen will copy the style sheet file to the output directory. For an example # see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that the # files will be copied as-is; there are no commands or markers available. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the stylesheet and background images according to # this color. Hue is specified as an angle on a colorwheel, see # http://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. # Minimum value: 0, maximum value: 359, default value: 220. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_HUE = 100 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors # in the HTML output. For a value of 0 the output will use grayscales only. A # value of 255 will produce the most vivid colors. # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_SAT = 50 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the # luminance component of the colors in the HTML output. Values below 100 # gradually make the output lighter, whereas values above 100 make the output # darker. The value divided by 100 is the actual gamma applied, so 80 represents # a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not # change the gamma. # Minimum value: 40, maximum value: 240, default value: 80. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting this # to NO can help when comparing the output of multiple runs. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_TIMESTAMP = NO # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries # shown in the various tree structured indices initially; the user can expand # and collapse entries dynamically later on. Doxygen will expand the tree to # such a level that at most the specified number of entries are visible (unless # a fully collapsed tree already exceeds this amount). So setting the number of # entries 1 will produce a full collapsed tree by default. 0 is a special value # representing an infinite number of entries and will result in a full expanded # tree by default. # Minimum value: 0, maximum value: 9999, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development # environment (see: http://developer.apple.com/tools/xcode/), introduced with # OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a # Makefile in the HTML output directory. Running make will produce the docset in # that directory and running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at # startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_DOCSET = NO # This tag determines the name of the docset feed. A documentation feed provides # an umbrella under which multiple documentation sets from a single provider # (such as a company or product suite) can be grouped. # The default value is: Doxygen generated docs. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_FEEDNAME = "Doxygen generated docs" # This tag specifies a string that should uniquely identify the documentation # set bundle. This should be a reverse domain-name style string, e.g. # com.mycompany.MyDocSet. Doxygen will append .docset to the name. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_BUNDLE_ID = org.doxygen.Project # The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. # The default value is: org.doxygen.Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. # The default value is: Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop # (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on # Windows. # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML # files are now used as the Windows 98 help format, and will replace the old # Windows help format (.hlp) on all Windows platforms in the future. Compressed # HTML files also contain an index, a table of contents, and you can search for # words in the documentation. The HTML workshop also contains a viewer for # compressed HTML files. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_HTMLHELP = NO # The CHM_FILE tag can be used to specify the file name of the resulting .chm # file. You can add a path in front of the file if the result should not be # written to the html output directory. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_FILE = # The HHC_LOCATION tag can be used to specify the location (absolute path # including file name) of the HTML help compiler ( hhc.exe). If non-empty # doxygen will try to run the HTML help compiler on the generated index.hhp. # The file has to be specified with full path. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated ( # YES) or that it should be included in the master .chm file ( NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. GENERATE_CHI = NO # The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) # and project file content. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_INDEX_ENCODING = # The BINARY_TOC flag controls whether a binary table of contents is generated ( # YES) or a normal table of contents ( NO) in the .chm file. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members to # the table of contents of the HTML help documentation and to the tree view. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that # can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help # (.qch) of the generated HTML documentation. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify # the file name of the resulting .qch file. The path specified is relative to # the HTML output folder. # This tag requires that the tag GENERATE_QHP is set to YES. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace # (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual # Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- # folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom # Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom # Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: # http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = # The QHG_LOCATION tag can be used to specify the location of Qt's # qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the # generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be # generated, together with the HTML files, they form an Eclipse help plugin. To # install this plugin and make it available under the help contents menu in # Eclipse, the contents of the directory containing the HTML and XML files needs # to be copied into the plugins directory of eclipse. The name of the directory # within the plugins directory should be the same as the ECLIPSE_DOC_ID value. # After copying Eclipse needs to be restarted before the help appears. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_ECLIPSEHELP = NO # A unique identifier for the Eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have this # name. Each documentation set should have its own identifier. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. ECLIPSE_DOC_ID = org.doxygen.Project # If you want full control over the layout of the generated HTML pages it might # be necessary to disable the index and replace it with your own. The # DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top # of each HTML page. A value of NO enables the index and the value YES disables # it. Since the tabs in the index contain the same information as the navigation # tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. If the tag # value is set to YES, a side panel will be generated containing a tree-like # index structure (just like the one that is generated for HTML Help). For this # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the # HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can # further fine-tune the look of the index. As an example, the default style # sheet generated by doxygen has an example that shows how to put an image at # the root of the tree instead of the PROJECT_NAME. Since the tree basically has # the same information as the tab index, you could consider setting # DISABLE_INDEX to YES when enabling this option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that # doxygen will group on one line in the generated HTML documentation. # # Note that a value of 0 will completely suppress the enum values from appearing # in the overview section. # Minimum value: 0, maximum value: 20, default value: 4. # This tag requires that the tag GENERATE_HTML is set to YES. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used # to set the initial width (in pixels) of the frame in which the tree is shown. # Minimum value: 0, maximum value: 1500, default value: 250. # This tag requires that the tag GENERATE_HTML is set to YES. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to # external symbols imported via tag files in a separate window. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML # output directory to force them to be regenerated. # Minimum value: 8, maximum value: 50, default value: 10. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are not # supported properly for IE 6.0, but are supported on all modern browsers. # # Note that when changing this option you need to delete any form_*.png files in # the HTML output directory before the changes have effect. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see # http://www.mathjax.org) which uses client side Javascript for the rendering # instead of using prerendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path # to it using the MATHJAX_RELPATH option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. USE_MATHJAX = YES # When MathJax is enabled you can set the default output format to be used for # the MathJax output. See the MathJax site (see: # http://docs.mathjax.org/en/latest/output.html) for more details. # Possible values are: HTML-CSS (which is slower, but has the best # compatibility), NativeMML (i.e. MathML) and SVG. # The default value is: HTML-CSS. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the HTML # output directory using the MATHJAX_RELPATH option. The destination directory # should contain the MathJax.js script. For instance, if the mathjax directory # is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of # MathJax from http://www.mathjax.org before deployment. # The default value is: http://cdn.mathjax.org/mathjax/latest. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site # (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_CODEFILE = # When the SEARCHENGINE tag is enabled doxygen will generate a search box for # the HTML output. The underlying search engine uses javascript and DHTML and # should work on any modern browser. Note that when using HTML help # (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) # there is already a search function so this one should typically be disabled. # For large projects the javascript based search engine can be slow, then # enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to # search using the keyboard; to jump to the search box use <access key> + S # (what the <access key> is depends on the OS and browser, but it is typically # <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down # key> to jump into the search results window, the results can be navigated # using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel # the search. The filter options can be selected when the cursor is inside the # search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys> # to select a filter and <Enter> or <escape> to activate or cancel the filter # option. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a web server instead of a web client using Javascript. There # are two flavours of web server based searching depending on the # EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for # searching and an index file used by the script. When EXTERNAL_SEARCH is # enabled the indexing and searching needs to be provided by external tools. See # the section "External Indexing and Searching" for details. # The default value is: NO. # This tag requires that the tag SEARCHENGINE is set to YES. SERVER_BASED_SEARCH = NO # When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP # script for searching. Instead the search results are written to an XML file # which needs to be processed by an external indexer. Doxygen will invoke an # external search engine pointed to by the SEARCHENGINE_URL option to obtain the # search results. # # Doxygen ships with an example indexer ( doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library # Xapian (see: http://xapian.org/). # # See the section "External Indexing and Searching" for details. # The default value is: NO. # This tag requires that the tag SEARCHENGINE is set to YES. EXTERNAL_SEARCH = NO # The SEARCHENGINE_URL should point to a search engine hosted by a web server # which will return the search results when EXTERNAL_SEARCH is enabled. # # Doxygen ships with an example indexer ( doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library # Xapian (see: http://xapian.org/). See the section "External Indexing and # Searching" for details. # This tag requires that the tag SEARCHENGINE is set to YES. SEARCHENGINE_URL = # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed # search data is written to a file for indexing by an external tool. With the # SEARCHDATA_FILE tag the name of this file can be specified. # The default file is: searchdata.xml. # This tag requires that the tag SEARCHENGINE is set to YES. SEARCHDATA_FILE = searchdata.xml # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the # EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is # useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple # projects and redirect the results back to the right project. # This tag requires that the tag SEARCHENGINE is set to YES. EXTERNAL_SEARCH_ID = # The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen # projects other than the one defined by this configuration file, but that are # all added to the same external search index. Each project needs to have a # unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of # to a relative location where the documentation can be found. The format is: # EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ... # This tag requires that the tag SEARCHENGINE is set to YES. EXTRA_SEARCH_MAPPINGS = #--------------------------------------------------------------------------- # Configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output. # The default value is: YES. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: latex. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. # # Note that when enabling USE_PDFLATEX this option is only used for generating # bitmaps for formulas in the HTML output, but not in the Makefile that is # written to the output directory. # The default file is: latex. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate # index for LaTeX. # The default file is: makeindex. # This tag requires that the tag GENERATE_LATEX is set to YES. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX # documents. This may be useful for small projects and may help to save some # trees in general. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used by the # printer. # Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x # 14 inches) and executive (7.25 x 10.5 inches). # The default value is: a4. # This tag requires that the tag GENERATE_LATEX is set to YES. PAPER_TYPE = a4 # The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names # that should be included in the LaTeX output. To get the times font for # instance you can specify # EXTRA_PACKAGES=times # If left blank no extra packages will be included. # This tag requires that the tag GENERATE_LATEX is set to YES. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for the # generated LaTeX document. The header should contain everything until the first # chapter. If it is left blank doxygen will generate a standard header. See # section "Doxygen usage" for information on how to let doxygen write the # default header to a separate file. # # Note: Only use a user-defined header if you know what you are doing! The # following commands have a special meaning inside the header: $title, # $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will # replace them by respectively the title of the page, the current date and time, # only the current date, the version number of doxygen, the project name (see # PROJECT_NAME), or the project number (see PROJECT_NUMBER). # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the # generated LaTeX document. The footer should contain everything after the last # chapter. If it is left blank doxygen will generate a standard footer. # # Note: Only use a user-defined footer if you know what you are doing! # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_FOOTER = # The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the LATEX_OUTPUT output # directory. Note that the files will be copied as-is; there are no commands or # markers available. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_EXTRA_FILES = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is # prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will # contain links (just like the HTML output) instead of page references. This # makes the output suitable for online browsing using a PDF viewer. # The default value is: YES. # This tag requires that the tag GENERATE_LATEX is set to YES. PDF_HYPERLINKS = YES # If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate # the PDF file directly from the LaTeX files. Set this option to YES to get a # higher quality PDF documentation. # The default value is: YES. # This tag requires that the tag GENERATE_LATEX is set to YES. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode # command to the generated LaTeX files. This will instruct LaTeX to keep running # if errors occur, instead of asking the user for help. This option is also used # when generating formulas in HTML. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_BATCHMODE = NO # If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the # index chapters (such as File Index, Compound Index, etc.) in the output. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_HIDE_INDICES = NO # If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source # code with syntax highlighting in the LaTeX output. # # Note that which sources are shown also depends on other settings such as # SOURCE_BROWSER. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. See # http://en.wikipedia.org/wiki/BibTeX and \cite for more info. # The default value is: plain. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_BIB_STYLE = plain #--------------------------------------------------------------------------- # Configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The # RTF output is optimized for Word 97 and may not look too pretty with other RTF # readers/editors. # The default value is: NO. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: rtf. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF # documents. This may be useful for small projects and may help to save some # trees in general. # The default value is: NO. # This tag requires that the tag GENERATE_RTF is set to YES. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will # contain hyperlink fields. The RTF file will contain links (just like the HTML # output) instead of page references. This makes the output suitable for online # browsing using Word or some other Word compatible readers that support those # fields. # # Note: WordPad (write) and others do not support links. # The default value is: NO. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's config # file, i.e. a series of assignments. You only have to provide replacements, # missing definitions are set to their default value. # # See also section "Doxygen usage" for information on how to generate the # default style sheet that doxygen normally uses. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an RTF document. Syntax is # similar to doxygen's config file. A template extensions file can be generated # using doxygen -e rtf extensionFile. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # Configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES doxygen will generate man pages for # classes and files. # The default value is: NO. GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. A directory man3 will be created inside the directory specified by # MAN_OUTPUT. # The default directory is: man. # This tag requires that the tag GENERATE_MAN is set to YES. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to the generated # man pages. In case the manual section does not start with a number, the number # 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is # optional. # The default value is: .3. # This tag requires that the tag GENERATE_MAN is set to YES. MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and doxygen generates man output, then it # will generate one additional man file for each entity documented in the real # man page(s). These additional files only source the real man page, but without # them the man command would be unable to find the correct page. # The default value is: NO. # This tag requires that the tag GENERATE_MAN is set to YES. MAN_LINKS = NO #--------------------------------------------------------------------------- # Configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES doxygen will generate an XML file that # captures the structure of the code including all documentation. # The default value is: NO. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: xml. # This tag requires that the tag GENERATE_XML is set to YES. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify a XML schema, which can be used by a # validating XML parser to check the syntax of the XML files. # This tag requires that the tag GENERATE_XML is set to YES. XML_SCHEMA = # The XML_DTD tag can be used to specify a XML DTD, which can be used by a # validating XML parser to check the syntax of the XML files. # This tag requires that the tag GENERATE_XML is set to YES. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program # listings (including syntax highlighting and cross-referencing information) to # the XML output. Note that enabling this will significantly increase the size # of the XML output. # The default value is: YES. # This tag requires that the tag GENERATE_XML is set to YES. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # Configuration options related to the DOCBOOK output #--------------------------------------------------------------------------- # If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files # that can be used to generate PDF. # The default value is: NO. GENERATE_DOCBOOK = NO # The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be put in # front of it. # The default directory is: docbook. # This tag requires that the tag GENERATE_DOCBOOK is set to YES. DOCBOOK_OUTPUT = docbook #--------------------------------------------------------------------------- # Configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen # Definitions (see http://autogen.sf.net) file that captures the structure of # the code including all documentation. Note that this feature is still # experimental and incomplete at the moment. # The default value is: NO. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # Configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module # file that captures the structure of the code including all documentation. # # Note that this feature is still experimental and incomplete at the moment. # The default value is: NO. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary # Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI # output from the Perl module output. # The default value is: NO. # This tag requires that the tag GENERATE_PERLMOD is set to YES. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely # formatted so it can be parsed by a human reader. This is useful if you want to # understand what is going on. On the other hand, if this tag is set to NO the # size of the Perl module output will be much smaller and Perl will parse it # just the same. # The default value is: YES. # This tag requires that the tag GENERATE_PERLMOD is set to YES. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file are # prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful # so different doxyrules.make files included by the same Makefile don't # overwrite each other's variables. # This tag requires that the tag GENERATE_PERLMOD is set to YES. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all # C-preprocessor directives found in the sources and include files. # The default value is: YES. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names # in the source code. If set to NO only conditional compilation will be # performed. Macro expansion can be done in a controlled way by setting # EXPAND_ONLY_PREDEF to YES. # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then # the macro expansion is limited to the macros specified with the PREDEFINED and # EXPAND_AS_DEFINED tags. # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES the includes files in the # INCLUDE_PATH will be searched if a #include is found. # The default value is: YES. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by the # preprocessor. # This tag requires that the tag SEARCH_INCLUDES is set to YES. INCLUDE_PATH = . # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will be # used. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that are # defined before the preprocessor is started (similar to the -D option of e.g. # gcc). The argument of the tag is a list of macros of the form: name or # name=definition (no spaces). If the definition and the "=" are omitted, "=1" # is assumed. To prevent a macro definition from being undefined via #undef or # recursively expanded use the := operator instead of the = operator. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # tag can be used to specify a list of macro names that should be expanded. The # macro definition that is found in the sources will be used. Use the PREDEFINED # tag if you want to use a different macro definition that overrules the # definition found in the source code. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will # remove all refrences to function-like macros that are alone on a line, have an # all uppercase name, and do not end with a semicolon. Such function macros are # typically used for boiler-plate code, and will confuse the parser if not # removed. # The default value is: YES. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration options related to external references #--------------------------------------------------------------------------- # The TAGFILES tag can be used to specify one or more tag files. For each tag # file the location of the external documentation should be added. The format of # a tag file without this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where loc1 and loc2 can be relative or absolute paths or URLs. See the # section "Linking to external documentation" for more information about the use # of tag files. # Note: Each tag file must have an unique name (where the name does NOT include # the path). If a tag file is not located in the directory in which doxygen is # run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create a # tag file that is based on the input files it reads. See section "Linking to # external documentation" for more information about the usage of tag files. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external class will be listed in the # class index. If set to NO only the inherited external classes will be listed. # The default value is: NO. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in # the modules index. If set to NO, only the current project's groups will be # listed. # The default value is: YES. EXTERNAL_GROUPS = YES # If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in # the related pages index. If set to NO, only the current project's pages will # be listed. # The default value is: YES. EXTERNAL_PAGES = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of 'which perl'). # The default file (with absolute path) is: /usr/bin/perl. PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram # (in HTML and LaTeX) for classes with base or super classes. Setting the tag to # NO turns the diagrams off. Note that this option also works with HAVE_DOT # disabled, but it is recommended to install and use dot, since it yields more # powerful graphs. # The default value is: YES. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see: # http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide inheritance # and usage relations if the target is undocumented or is not a class. # The default value is: YES. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz (see: # http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent # Bell Labs. The other options in this section have no effect if this option is # set to NO # The default value is: NO. HAVE_DOT = $(HAVE_DOT) # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed # to run in parallel. When set to 0 doxygen will base this on the number of # processors available in the system. You can set it explicitly to a value # larger than 0 to get control over the balance between CPU load and processing # speed. # Minimum value: 0, maximum value: 32, default value: 0. # This tag requires that the tag HAVE_DOT is set to YES. DOT_NUM_THREADS = 0 # When you want a differently looking font n the dot files that doxygen # generates you can specify the font name using DOT_FONTNAME. You need to make # sure dot is able to find the font, which can be done by putting it in a # standard location or by setting the DOTFONTPATH environment variable or by # setting DOT_FONTPATH to the directory containing the font. # The default value is: Helvetica. # This tag requires that the tag HAVE_DOT is set to YES. DOT_FONTNAME = Helvetica # The DOT_FONTSIZE tag can be used to set the size (in points) of the font of # dot graphs. # Minimum value: 4, maximum value: 24, default value: 10. # This tag requires that the tag HAVE_DOT is set to YES. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the default font as specified with # DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set # the path where dot can find it using this tag. # This tag requires that the tag HAVE_DOT is set to YES. DOT_FONTPATH = # If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for # each documented class showing the direct and indirect inheritance relations. # Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a # graph for each documented class showing the direct and indirect implementation # dependencies (inheritance, containment, and class references variables) of the # class with other documented classes. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for # groups, showing the direct groups dependencies. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside the # class node. If there are many fields or methods and many nodes the graph may # become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the # number of items for each type to make the size more manageable. Set this to 0 # for no limit. Note that the threshold may be exceeded by 50% before the limit # is enforced. So when you set the threshold to 10, up to 15 fields may appear, # but if the number exceeds 15, the total amount of fields shown is limited to # 10. # Minimum value: 0, maximum value: 100, default value: 10. # This tag requires that the tag HAVE_DOT is set to YES. UML_LIMIT_NUM_FIELDS = 10 # If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and # collaboration graphs will show the relations between templates and their # instances. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. TEMPLATE_RELATIONS = NO # If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to # YES then doxygen will generate a graph for each documented file showing the # direct and indirect include dependencies of the file with other documented # files. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. INCLUDE_GRAPH = YES # If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are # set to YES then doxygen will generate a graph for each documented file showing # the direct and indirect include dependencies of the file with other documented # files. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH tag is set to YES then doxygen will generate a call # dependency graph for every global function or class method. # # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable call graphs for selected # functions only using the \callgraph command. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. CALL_GRAPH = NO # If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller # dependency graph for every global function or class method. # # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable caller graphs for selected # functions only using the \callergraph command. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical # hierarchy of all classes instead of a textual one. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the # dependencies a directory has on other directories in a graphical way. The # dependency relations are determined by the #include relations between the # files in the directories. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. # Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order # to make the SVG files visible in IE 9+ (other browsers do not have this # requirement). # Possible values are: png, jpg, gif and svg. # The default value is: png. # This tag requires that the tag HAVE_DOT is set to YES. DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # # Note that this requires a modern browser other than Internet Explorer. Tested # and working are Firefox, Chrome, Safari, and Opera. # Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make # the SVG files visible. Older versions of IE do not have SVG support. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. INTERACTIVE_SVG = NO # The DOT_PATH tag can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. # This tag requires that the tag HAVE_DOT is set to YES. DOT_PATH = $(DOT_PATH) # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the \dotfile # command). # This tag requires that the tag HAVE_DOT is set to YES. DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the \mscfile # command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes # that will be shown in the graph. If the number of nodes in a graph becomes # larger than this value, doxygen will truncate the graph, which is visualized # by representing a node as a red box. Note that doxygen if the number of direct # children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that # the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. # Minimum value: 0, maximum value: 10000, default value: 50. # This tag requires that the tag HAVE_DOT is set to YES. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs # generated by dot. A depth value of 3 means that only nodes reachable from the # root by following a path via at most 3 edges will be shown. Nodes that lay # further from the root node will be omitted. Note that setting this option to 1 # or 2 may greatly reduce the computation time needed for large code bases. Also # note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. # Minimum value: 0, maximum value: 1000, default value: 0. # This tag requires that the tag HAVE_DOT is set to YES. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not seem # to support this out of the box. # # Warning: Depending on the platform used, enabling this option may lead to # badly anti-aliased labels on the edges of a graph (i.e. they become hard to # read). # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) support # this, this feature is disabled by default. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page # explaining the meaning of the various boxes and arrows in the dot generated # graphs. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot # files that are used to generate the various graphs. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. DOT_CLEANUP = YES
cutlass/Doxyfile/0
{ "file_path": "cutlass/Doxyfile", "repo_id": "cutlass", "token_count": 28977 }
0
/* The standard CSS for doxygen 1.8.11 */ body, table, div, p, dl { font: 400 14px/22px Roboto,sans-serif; } /* @group Heading Levels */ h1.groupheader { font-size: 150%; } .title { font: 400 14px/28px Roboto,sans-serif; font-size: 150%; font-weight: bold; margin: 10px 2px; } h2.groupheader { border-bottom: 1px solid #A3BA98; color: #526947; font-size: 150%; font-weight: normal; margin-top: 1.75em; padding-top: 8px; padding-bottom: 4px; width: 100%; } h3.groupheader { font-size: 100%; } h1, h2, h3, h4, h5, h6 { -webkit-transition: text-shadow 0.5s linear; -moz-transition: text-shadow 0.5s linear; -ms-transition: text-shadow 0.5s linear; -o-transition: text-shadow 0.5s linear; transition: text-shadow 0.5s linear; margin-right: 15px; } h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow { text-shadow: 0 0 15px cyan; } dt { font-weight: bold; } div.multicol { -moz-column-gap: 1em; -webkit-column-gap: 1em; -moz-column-count: 3; -webkit-column-count: 3; } p.startli, p.startdd { margin-top: 2px; } p.starttd { margin-top: 0px; } p.endli { margin-bottom: 0px; } p.enddd { margin-bottom: 4px; } p.endtd { margin-bottom: 2px; } /* @end */ caption { font-weight: bold; } span.legend { font-size: 70%; text-align: center; } h3.version { font-size: 90%; text-align: center; } div.qindex, div.navtab{ background-color: #F0F3EE; border: 1px solid #B9CAB0; text-align: center; } div.qindex, div.navpath { width: 100%; line-height: 140%; } div.navtab { margin-right: 15px; } /* @group Link Styling */ a { color: #5E7851; font-weight: normal; text-decoration: none; } .contents a:visited { color: #6D8B5D; } a:hover { text-decoration: underline; } a.qindex { font-weight: bold; } a.qindexHL { font-weight: bold; background-color: #B3C6AA; color: #ffffff; border: 1px double #A2B997; } .contents a.qindexHL:visited { color: #ffffff; } a.el { font-weight: bold; } a.elRef { } a.code, a.code:visited, a.line, a.line:visited { color: #4665A2; } a.codeRef, a.codeRef:visited, a.lineRef, a.lineRef:visited { color: #4665A2; } /* @end */ dl.el { margin-left: -1cm; } pre.fragment { border: 1px solid #C4CFE5; background-color: #FBFCFD; padding: 4px 6px; margin: 4px 8px 4px 2px; overflow: auto; word-wrap: break-word; font-size: 9pt; line-height: 125%; font-family: monospace, fixed; font-size: 105%; } div.fragment { padding: 4px 6px; margin: 4px 8px 4px 2px; background-color: #FCFDFC; border: 1px solid #D2DDCD; } div.line { font-family: monospace, fixed; font-size: 13px; min-height: 13px; line-height: 1.0; text-wrap: unrestricted; white-space: -moz-pre-wrap; /* Moz */ white-space: -pre-wrap; /* Opera 4-6 */ white-space: -o-pre-wrap; /* Opera 7 */ white-space: pre-wrap; /* CSS3 */ word-wrap: break-word; /* IE 5.5+ */ text-indent: -53px; padding-left: 53px; padding-bottom: 0px; margin: 0px; -webkit-transition-property: background-color, box-shadow; -webkit-transition-duration: 0.5s; -moz-transition-property: background-color, box-shadow; -moz-transition-duration: 0.5s; -ms-transition-property: background-color, box-shadow; -ms-transition-duration: 0.5s; -o-transition-property: background-color, box-shadow; -o-transition-duration: 0.5s; transition-property: background-color, box-shadow; transition-duration: 0.5s; } div.line:after { content:"\000A"; white-space: pre; } div.line.glow { background-color: cyan; box-shadow: 0 0 10px cyan; } span.lineno { padding-right: 4px; text-align: right; border-right: 2px solid #0F0; background-color: #E8E8E8; white-space: pre; } span.lineno a { background-color: #D8D8D8; } span.lineno a:hover { background-color: #C8C8C8; } div.ah, span.ah { background-color: black; font-weight: bold; color: #ffffff; margin-bottom: 3px; margin-top: 3px; padding: 0.2em; border: solid thin #333; border-radius: 0.5em; -webkit-border-radius: .5em; -moz-border-radius: .5em; box-shadow: 2px 2px 3px #999; -webkit-box-shadow: 2px 2px 3px #999; -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444)); background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000 110%); } div.classindex ul { list-style: none; padding-left: 0; } div.classindex span.ai { display: inline-block; } div.groupHeader { margin-left: 16px; margin-top: 12px; font-weight: bold; } div.groupText { margin-left: 16px; font-style: italic; } body { background-color: white; color: black; margin: 0; } div.contents { margin-top: 10px; margin-left: 12px; margin-right: 8px; } td.indexkey { background-color: #F0F3EE; font-weight: bold; border: 1px solid #D2DDCD; margin: 2px 0px 2px 0; padding: 2px 10px; white-space: nowrap; vertical-align: top; } td.indexvalue { background-color: #F0F3EE; border: 1px solid #D2DDCD; padding: 2px 10px; margin: 2px 0px; } tr.memlist { background-color: #F2F5F0; } p.formulaDsp { text-align: center; } img.formulaDsp { } img.formulaInl { vertical-align: middle; } div.center { text-align: center; margin-top: 0px; margin-bottom: 0px; padding: 0px; } div.center img { border: 0px; } address.footer { text-align: right; padding-right: 12px; } img.footer { border: 0px; vertical-align: middle; } /* @group Code Colorization */ span.keyword { color: #008000 } span.keywordtype { color: #604020 } span.keywordflow { color: #e08000 } span.comment { color: #800000 } span.preprocessor { color: #806020 } span.stringliteral { color: #002080 } span.charliteral { color: #008080 } span.vhdldigit { color: #ff00ff } span.vhdlchar { color: #000000 } span.vhdlkeyword { color: #700070 } span.vhdllogic { color: #ff0000 } blockquote { background-color: #F9FAF8; border-left: 2px solid #B3C6AA; margin: 0 24px 0 4px; padding: 0 12px 0 16px; } /* @end */ /* .search { color: #003399; font-weight: bold; } form.search { margin-bottom: 0px; margin-top: 0px; } input.search { font-size: 75%; color: #000080; font-weight: normal; background-color: #e8eef2; } */ td.tiny { font-size: 75%; } .dirtab { padding: 4px; border-collapse: collapse; border: 1px solid #B9CAB0; } th.dirtab { background: #F0F3EE; font-weight: bold; } hr { height: 0px; border: none; border-top: 1px solid #729262; } hr.footer { height: 1px; } /* @group Member Descriptions */ table.memberdecls { border-spacing: 0px; padding: 0px; } .memberdecls td, .fieldtable tr { -webkit-transition-property: background-color, box-shadow; -webkit-transition-duration: 0.5s; -moz-transition-property: background-color, box-shadow; -moz-transition-duration: 0.5s; -ms-transition-property: background-color, box-shadow; -ms-transition-duration: 0.5s; -o-transition-property: background-color, box-shadow; -o-transition-duration: 0.5s; transition-property: background-color, box-shadow; transition-duration: 0.5s; } .memberdecls td.glow, .fieldtable tr.glow { background-color: cyan; box-shadow: 0 0 15px cyan; } .mdescLeft, .mdescRight, .memItemLeft, .memItemRight, .memTemplItemLeft, .memTemplItemRight, .memTemplParams { background-color: #FAFBFA; border: none; margin: 4px; padding: 1px 0 0 8px; } .mdescLeft, .mdescRight { padding: 0px 8px 4px 8px; color: #555; } .memSeparator { border-bottom: 1px solid #DEE4F0; line-height: 1px; margin: 0px; padding: 0px; } .memItemLeft, .memTemplItemLeft { white-space: nowrap; } .memItemRight { width: 100%; } .memTemplParams { color: #6D8B5D; white-space: nowrap; font-size: 80%; } /* @end */ /* @group Member Details */ /* Styles for detailed member documentation */ .memtemplate { font-size: 80%; color: #6D8B5D; font-weight: normal; margin-left: 9px; } .memnav { background-color: #F0F3EE; border: 1px solid #B9CAB0; text-align: center; margin: 2px; margin-right: 15px; padding: 2px; } .mempage { width: 100%; } .memitem { padding: 0; margin-bottom: 10px; margin-right: 5px; -webkit-transition: box-shadow 0.5s linear; -moz-transition: box-shadow 0.5s linear; -ms-transition: box-shadow 0.5s linear; -o-transition: box-shadow 0.5s linear; transition: box-shadow 0.5s linear; display: table !important; width: 100%; } .memitem.glow { box-shadow: 0 0 15px cyan; } .memname { font-weight: bold; margin-left: 6px; } .memname td { vertical-align: bottom; } .memproto, dl.reflist dt { border-top: 1px solid #BCCDB4; border-left: 1px solid #BCCDB4; border-right: 1px solid #BCCDB4; padding: 6px 0px 6px 0px; color: #394931; font-weight: bold; text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); background-image:url('nav_f.png'); background-repeat:repeat-x; background-color: #E9EEE6; /* opera specific markup */ box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); border-top-right-radius: 4px; border-top-left-radius: 4px; /* firefox specific markup */ -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; -moz-border-radius-topright: 4px; -moz-border-radius-topleft: 4px; /* webkit specific markup */ -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); -webkit-border-top-right-radius: 4px; -webkit-border-top-left-radius: 4px; } .memdoc, dl.reflist dd { border-bottom: 1px solid #BCCDB4; border-left: 1px solid #BCCDB4; border-right: 1px solid #BCCDB4; padding: 6px 10px 2px 10px; background-color: #FCFDFC; border-top-width: 0; background-image:url('nav_g.png'); background-repeat:repeat-x; background-color: #FFFFFF; /* opera specific markup */ border-bottom-left-radius: 4px; border-bottom-right-radius: 4px; box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); /* firefox specific markup */ -moz-border-radius-bottomleft: 4px; -moz-border-radius-bottomright: 4px; -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; /* webkit specific markup */ -webkit-border-bottom-left-radius: 4px; -webkit-border-bottom-right-radius: 4px; -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); } dl.reflist dt { padding: 5px; } dl.reflist dd { margin: 0px 0px 10px 0px; padding: 5px; } .paramkey { text-align: right; } .paramtype { white-space: nowrap; } .paramname { color: #602020; white-space: nowrap; } .paramname em { font-style: normal; } .paramname code { line-height: 14px; } .params, .retval, .exception, .tparams { margin-left: 0px; padding-left: 0px; } .params .paramname, .retval .paramname { font-weight: bold; vertical-align: top; } .params .paramtype { font-style: italic; vertical-align: top; } .params .paramdir { font-family: "courier new",courier,monospace; vertical-align: top; } table.mlabels { border-spacing: 0px; } td.mlabels-left { width: 100%; padding: 0px; } td.mlabels-right { vertical-align: bottom; padding: 0px; white-space: nowrap; } span.mlabels { margin-left: 8px; } span.mlabel { background-color: #93AD86; border-top:1px solid #7B9C6B; border-left:1px solid #7B9C6B; border-right:1px solid #D2DDCD; border-bottom:1px solid #D2DDCD; text-shadow: none; color: white; margin-right: 4px; padding: 2px 3px; border-radius: 3px; font-size: 7pt; white-space: nowrap; vertical-align: middle; } /* @end */ /* these are for tree view inside a (index) page */ div.directory { margin: 10px 0px; border-top: 1px solid #B3C6AA; border-bottom: 1px solid #B3C6AA; width: 100%; } .directory table { border-collapse:collapse; } .directory td { margin: 0px; padding: 0px; vertical-align: top; } .directory td.entry { white-space: nowrap; padding-right: 6px; padding-top: 3px; } .directory td.entry a { outline:none; } .directory td.entry a img { border: none; } .directory td.desc { width: 100%; padding-left: 6px; padding-right: 6px; padding-top: 3px; border-left: 1px solid rgba(0,0,0,0.05); } .directory tr.even { padding-left: 6px; background-color: #F9FAF8; } .directory img { vertical-align: -30%; } .directory .levels { white-space: nowrap; width: 100%; text-align: right; font-size: 9pt; } .directory .levels span { cursor: pointer; padding-left: 2px; padding-right: 2px; color: #5E7851; } .arrow { color: #B3C6AA; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; cursor: pointer; font-size: 80%; display: inline-block; width: 16px; height: 22px; } .icon { font-family: Arial, Helvetica; font-weight: bold; font-size: 12px; height: 14px; width: 16px; display: inline-block; background-color: #93AD86; color: white; text-align: center; border-radius: 4px; margin-left: 2px; margin-right: 2px; } .icona { width: 24px; height: 22px; display: inline-block; } .iconfopen { width: 24px; height: 18px; margin-bottom: 4px; background-image:url('folderopen.png'); background-position: 0px -4px; background-repeat: repeat-y; vertical-align:top; display: inline-block; } .iconfclosed { width: 24px; height: 18px; margin-bottom: 4px; background-image:url('folderclosed.png'); background-position: 0px -4px; background-repeat: repeat-y; vertical-align:top; display: inline-block; } .icondoc { width: 24px; height: 18px; margin-bottom: 4px; background-image:url('doc.png'); background-position: 0px -4px; background-repeat: repeat-y; vertical-align:top; display: inline-block; } table.directory { font: 400 14px Roboto,sans-serif; } /* @end */ div.dynheader { margin-top: 8px; -webkit-touch-callout: none; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } address { font-style: normal; color: #415438; } table.doxtable caption { caption-side: top; } table.doxtable { border-collapse:collapse; margin-top: 4px; margin-bottom: 4px; } table.doxtable td, table.doxtable th { border: 1px solid #45593C; padding: 3px 7px 2px; } table.doxtable th { background-color: #556D49; color: #FFFFFF; font-size: 110%; padding-bottom: 4px; padding-top: 5px; } table.fieldtable { /*width: 100%;*/ margin-bottom: 10px; border: 1px solid #BCCDB4; border-spacing: 0px; -moz-border-radius: 4px; -webkit-border-radius: 4px; border-radius: 4px; -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); } .fieldtable td, .fieldtable th { padding: 3px 7px 2px; } .fieldtable td.fieldtype, .fieldtable td.fieldname { white-space: nowrap; border-right: 1px solid #BCCDB4; border-bottom: 1px solid #BCCDB4; vertical-align: top; } .fieldtable td.fieldname { padding-top: 3px; } .fieldtable td.fielddoc { border-bottom: 1px solid #BCCDB4; /*width: 100%;*/ } .fieldtable td.fielddoc p:first-child { margin-top: 0px; } .fieldtable td.fielddoc p:last-child { margin-bottom: 2px; } .fieldtable tr:last-child td { border-bottom: none; } .fieldtable th { background-image:url('nav_f.png'); background-repeat:repeat-x; background-color: #E9EEE6; font-size: 90%; color: #394931; padding-bottom: 4px; padding-top: 5px; text-align:left; -moz-border-radius-topleft: 4px; -moz-border-radius-topright: 4px; -webkit-border-top-left-radius: 4px; -webkit-border-top-right-radius: 4px; border-top-left-radius: 4px; border-top-right-radius: 4px; border-bottom: 1px solid #BCCDB4; } .tabsearch { top: 0px; left: 10px; height: 36px; background-image: url('tab_b.png'); z-index: 101; overflow: hidden; font-size: 13px; } .navpath ul { font-size: 11px; background-image:url('tab_b.png'); background-repeat:repeat-x; background-position: 0 -5px; height:30px; line-height:30px; color:#A5BB9A; border:solid 1px #D0DCCB; overflow:hidden; margin:0px; padding:0px; } .navpath li { list-style-type:none; float:left; padding-left:10px; padding-right:15px; background-image:url('bc_s.png'); background-repeat:no-repeat; background-position:right; color:#536B48; } .navpath li.navelem a { height:32px; display:block; text-decoration: none; outline: none; color: #3E5035; font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); text-decoration: none; } .navpath li.navelem a:hover { color:#8BA87D; } .navpath li.footer { list-style-type:none; float:right; padding-left:10px; padding-right:15px; background-image:none; background-repeat:no-repeat; background-position:right; color:#536B48; font-size: 8pt; } div.summary { float: right; font-size: 8pt; padding-right: 5px; width: 50%; text-align: right; } div.summary a { white-space: nowrap; } table.classindex { margin: 10px; white-space: nowrap; margin-left: 3%; margin-right: 3%; width: 94%; border: 0; border-spacing: 0; padding: 0; } div.ingroups { font-size: 8pt; width: 50%; text-align: left; } div.ingroups a { white-space: nowrap; } div.header { background-image:url('nav_h.png'); background-repeat:repeat-x; background-color: #FAFBFA; margin: 0px; border-bottom: 1px solid #D2DDCD; } div.headertitle { padding: 5px 5px 5px 10px; } dl { padding: 0 0 0 10px; } /* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug */ dl.section { margin-left: 0px; padding-left: 0px; } dl.note { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #D0C000; } dl.warning, dl.attention { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #FF0000; } dl.pre, dl.post, dl.invariant { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #00D000; } dl.deprecated { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #505050; } dl.todo { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #00C0E0; } dl.test { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #3030E0; } dl.bug { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #C08050; } dl.section dd { margin-bottom: 6px; } #projectlogo { text-align: center; vertical-align: bottom; border-collapse: separate; } #projectlogo img { border: 0px none; } #projectalign { vertical-align: middle; } #projectname { font: 300% Tahoma, Arial,sans-serif; margin: 0px; padding: 2px 0px; } #projectbrief { font: 120% Tahoma, Arial,sans-serif; margin: 0px; padding: 0px; } #projectnumber { font: 50% Tahoma, Arial,sans-serif; margin: 0px; padding: 0px; } #titlearea { padding: 0px; margin: 0px; width: 100%; border-bottom: 1px solid #7B9C6B; } .image { text-align: center; } .dotgraph { text-align: center; } .mscgraph { text-align: center; } .diagraph { text-align: center; } .caption { font-weight: bold; } div.zoom { border: 1px solid #AABFA0; } dl.citelist { margin-bottom:50px; } dl.citelist dt { color:#4E6443; float:left; font-weight:bold; margin-right:10px; padding:5px; } dl.citelist dd { margin:2px 0; padding:5px 0; } div.toc { padding: 14px 25px; background-color: #F7F9F6; border: 1px solid #E1E8DE; border-radius: 7px 7px 7px 7px; float: right; height: auto; margin: 0 8px 10px 10px; width: 200px; } div.toc li { background: url("bdwn.png") no-repeat scroll 0 5px transparent; font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif; margin-top: 5px; padding-left: 10px; padding-top: 2px; } div.toc h3 { font: bold 12px/1.2 Arial,FreeSans,sans-serif; color: #6D8B5D; border-bottom: 0 none; margin: 0; } div.toc ul { list-style: none outside none; border: medium none; padding: 0px; } div.toc li.level1 { margin-left: 0px; } div.toc li.level2 { margin-left: 15px; } div.toc li.level3 { margin-left: 30px; } div.toc li.level4 { margin-left: 45px; } .inherit_header { font-weight: bold; color: gray; cursor: pointer; -webkit-touch-callout: none; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } .inherit_header td { padding: 6px 0px 2px 5px; } .inherit { display: none; } tr.heading h2 { margin-top: 12px; margin-bottom: 4px; } /* tooltip related style info */ .ttc { position: absolute; display: none; } #powerTip { cursor: default; white-space: nowrap; background-color: white; border: 1px solid gray; border-radius: 4px 4px 4px 4px; box-shadow: 1px 1px 7px gray; display: none; font-size: smaller; max-width: 80%; opacity: 0.9; padding: 1ex 1em 1em; position: absolute; z-index: 2147483647; } #powerTip div.ttdoc { color: grey; font-style: italic; } #powerTip div.ttname a { font-weight: bold; } #powerTip div.ttname { font-weight: bold; } #powerTip div.ttdeci { color: #006318; } #powerTip div { margin: 0px; padding: 0px; font: 12px/16px Roboto,sans-serif; } #powerTip:before, #powerTip:after { content: ""; position: absolute; margin: 0px; } #powerTip.n:after, #powerTip.n:before, #powerTip.s:after, #powerTip.s:before, #powerTip.w:after, #powerTip.w:before, #powerTip.e:after, #powerTip.e:before, #powerTip.ne:after, #powerTip.ne:before, #powerTip.se:after, #powerTip.se:before, #powerTip.nw:after, #powerTip.nw:before, #powerTip.sw:after, #powerTip.sw:before { border: solid transparent; content: " "; height: 0; width: 0; position: absolute; } #powerTip.n:after, #powerTip.s:after, #powerTip.w:after, #powerTip.e:after, #powerTip.nw:after, #powerTip.ne:after, #powerTip.sw:after, #powerTip.se:after { border-color: rgba(255, 255, 255, 0); } #powerTip.n:before, #powerTip.s:before, #powerTip.w:before, #powerTip.e:before, #powerTip.nw:before, #powerTip.ne:before, #powerTip.sw:before, #powerTip.se:before { border-color: rgba(128, 128, 128, 0); } #powerTip.n:after, #powerTip.n:before, #powerTip.ne:after, #powerTip.ne:before, #powerTip.nw:after, #powerTip.nw:before { top: 100%; } #powerTip.n:after, #powerTip.ne:after, #powerTip.nw:after { border-top-color: #ffffff; border-width: 10px; margin: 0px -10px; } #powerTip.n:before { border-top-color: #808080; border-width: 11px; margin: 0px -11px; } #powerTip.n:after, #powerTip.n:before { left: 50%; } #powerTip.nw:after, #powerTip.nw:before { right: 14px; } #powerTip.ne:after, #powerTip.ne:before { left: 14px; } #powerTip.s:after, #powerTip.s:before, #powerTip.se:after, #powerTip.se:before, #powerTip.sw:after, #powerTip.sw:before { bottom: 100%; } #powerTip.s:after, #powerTip.se:after, #powerTip.sw:after { border-bottom-color: #ffffff; border-width: 10px; margin: 0px -10px; } #powerTip.s:before, #powerTip.se:before, #powerTip.sw:before { border-bottom-color: #808080; border-width: 11px; margin: 0px -11px; } #powerTip.s:after, #powerTip.s:before { left: 50%; } #powerTip.sw:after, #powerTip.sw:before { right: 14px; } #powerTip.se:after, #powerTip.se:before { left: 14px; } #powerTip.e:after, #powerTip.e:before { left: 100%; } #powerTip.e:after { border-left-color: #ffffff; border-width: 10px; top: 50%; margin-top: -10px; } #powerTip.e:before { border-left-color: #808080; border-width: 11px; top: 50%; margin-top: -11px; } #powerTip.w:after, #powerTip.w:before { right: 100%; } #powerTip.w:after { border-right-color: #ffffff; border-width: 10px; top: 50%; margin-top: -10px; } #powerTip.w:before { border-right-color: #808080; border-width: 11px; top: 50%; margin-top: -11px; } @media print { #top { display: none; } #side-nav { display: none; } #nav-path { display: none; } body { overflow:visible; } h1, h2, h3, h4, h5, h6 { page-break-after: avoid; } .summary { display: none; } .memitem { page-break-inside: avoid; } #doc-content { margin-left:0 !important; height:auto !important; width:auto !important; overflow:inherit; display:inline; } }
cutlass/docs/doxygen.css/0
{ "file_path": "cutlass/docs/doxygen.css", "repo_id": "cutlass", "token_count": 12388 }
1
var searchData= [ ['gemmkind',['GemmKind',['../namespacecutlass_1_1library.html#a8a2c782ab9bf9e19f99fdfcaf7f1c182',1,'cutlass::library']]] ];
cutlass/docs/search/enums_2.js/0
{ "file_path": "cutlass/docs/search/enums_2.js", "repo_id": "cutlass", "token_count": 69 }
2
var searchData= [ ['identity',['Identity',['../structcutlass_1_1Distribution.html#a499f4023e0d42356ce71d38cc32bf92aa08adbc111dc827d524a5509c948d8ba6',1,'cutlass::Distribution']]], ['invalid',['Invalid',['../structcutlass_1_1Distribution.html#a499f4023e0d42356ce71d38cc32bf92aa2ff14122c59a823654b84764f68e597b',1,'cutlass::Distribution']]] ];
cutlass/docs/search/enumvalues_1.js/0
{ "file_path": "cutlass/docs/search/enumvalues_1.js", "repo_id": "cutlass", "token_count": 154 }
3
var searchData= [ ['semaphore_2eh',['semaphore.h',['../semaphore_8h.html',1,'']]], ['shared_5fload_5fiterator_2eh',['shared_load_iterator.h',['../shared__load__iterator_8h.html',1,'']]], ['simd_2eh',['simd.h',['../simd_8h.html',1,'']]], ['simd_5fsm60_2eh',['simd_sm60.h',['../simd__sm60_8h.html',1,'']]], ['simd_5fsm61_2eh',['simd_sm61.h',['../simd__sm61_8h.html',1,'']]], ['simt_5fpolicy_2eh',['simt_policy.h',['../simt__policy_8h.html',1,'']]], ['subbyte_5freference_2eh',['subbyte_reference.h',['../subbyte__reference_8h.html',1,'']]] ];
cutlass/docs/search/files_10.js/0
{ "file_path": "cutlass/docs/search/files_10.js", "repo_id": "cutlass", "token_count": 266 }
4
var searchData= [ ['gemm_5fpipelined_2eh',['gemm_pipelined.h',['../gemm__pipelined_8h.html',1,'']]], ['gemv_2eh',['gemv.h',['../gemv_8h.html',1,'']]], ['gemv_5fbatched_5fstrided_2eh',['gemv_batched_strided.h',['../gemv__batched__strided_8h.html',1,'']]], ['mma_2eh',['mma.h',['../gemm_2thread_2mma_8h.html',1,'']]], ['mma_2eh',['mma.h',['../gemm_2warp_2mma_8h.html',1,'']]], ['mma_5fsm50_2eh',['mma_sm50.h',['../gemm_2thread_2mma__sm50_8h.html',1,'']]], ['mma_5fsm60_2eh',['mma_sm60.h',['../gemm_2thread_2mma__sm60_8h.html',1,'']]], ['mma_5fsm61_2eh',['mma_sm61.h',['../gemm_2thread_2mma__sm61_8h.html',1,'']]], ['threadblock_5fswizzle_2eh',['threadblock_swizzle.h',['../gemm_2threadblock_2threadblock__swizzle_8h.html',1,'']]] ];
cutlass/docs/search/files_6.js/0
{ "file_path": "cutlass/docs/search/files_6.js", "repo_id": "cutlass", "token_count": 378 }
5
var searchData= [ ['pitch_5flinear_2eh',['pitch_linear.h',['../pitch__linear_8h.html',1,'']]], ['pitch_5flinear_5fthread_5fmap_2eh',['pitch_linear_thread_map.h',['../pitch__linear__thread__map_8h.html',1,'']]], ['platform_2eh',['platform.h',['../platform_8h.html',1,'']]], ['predicate_5fvector_2eh',['predicate_vector.h',['../predicate__vector_8h.html',1,'']]], ['predicated_5ftile_5faccess_5fiterator_2eh',['predicated_tile_access_iterator.h',['../predicated__tile__access__iterator_8h.html',1,'']]], ['predicated_5ftile_5faccess_5fiterator_5f2dthreadtile_2eh',['predicated_tile_access_iterator_2dthreadtile.h',['../predicated__tile__access__iterator__2dthreadtile_8h.html',1,'']]], ['predicated_5ftile_5fiterator_5f2dthreadtile_2eh',['predicated_tile_iterator_2dthreadtile.h',['../predicated__tile__iterator__2dthreadtile_8h.html',1,'']]] ];
cutlass/docs/search/files_e.js/0
{ "file_path": "cutlass/docs/search/files_e.js", "repo_id": "cutlass", "token_count": 349 }
6
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Template for device-level Implicit GEMM */ #pragma once #include <limits> #include "cutlass/cutlass.h" #include "cutlass/device_kernel.h" #include "cutlass/conv/convolution.h" #include "kernel/b2b_implicit_gemm_convolution.h" #include "kernel/default_b2b_conv2d_fprop.h" #include "kernel/default_b2b_conv2d_fprop_sm75.h" #include "kernel/default_b2b_conv2d_fprop_sm80.h" #include "kernel/default_b2b_conv2d_fprop_smem_accumulator_sm75.h" #include "kernel/default_b2b_conv2d_fprop_smem_accumulator_sm80.h" namespace cutlass { namespace conv { namespace device { template<typename B2bImplicitGemmKernel_> class B2bImplicitGemmConvolution { public: using B2bImplicitGemmKernel = B2bImplicitGemmKernel_; using ElementA = typename B2bImplicitGemmKernel::ElementA; using LayoutA = typename B2bImplicitGemmKernel::LayoutA; using ElementB = typename B2bImplicitGemmKernel::ElementB; using LayoutB = typename B2bImplicitGemmKernel::LayoutB; using ElementC = typename B2bImplicitGemmKernel::ElementC; using LayoutC = typename B2bImplicitGemmKernel::LayoutC; using ElementAccumulator = typename B2bImplicitGemmKernel::ElementAccumulator; using ElementCompute = typename B2bImplicitGemmKernel::ElementCompute; using ElementScaleBias = typename B2bImplicitGemmKernel::ElementScaleBias; using LayoutScaleBias = typename B2bImplicitGemmKernel::LayoutScaleBias; using OperatorClass = typename B2bImplicitGemmKernel::OperatorClass; using ArchTag = typename B2bImplicitGemmKernel::ArchTag; using ThreadblockShape0 = typename B2bImplicitGemmKernel::ThreadblockShape0; using ThreadblockShape1 = typename B2bImplicitGemmKernel::ThreadblockShape1; using WarpShape0 = typename B2bImplicitGemmKernel::WarpShape0; using WarpShape1 = typename B2bImplicitGemmKernel::WarpShape1; using InstructionShape = typename B2bImplicitGemmKernel::InstructionShape; using ThreadblockSwizzle = typename B2bImplicitGemmKernel::ThreadblockSwizzle; using EpilogueOutputOp0 = typename B2bImplicitGemmKernel::EpilogueOutputOp0; using EpilogueOutputOp1 = typename B2bImplicitGemmKernel::EpilogueOutputOp1; static int const kStages = B2bImplicitGemmKernel::kStages; static int const kConvDim = B2bImplicitGemmKernel::kConvDim; using WarpMmaOperator0 = typename B2bImplicitGemmKernel::WarpMmaOperator0; using WarpMmaOperator1 = typename B2bImplicitGemmKernel::WarpMmaOperator1; using ArchMmaOperator = typename B2bImplicitGemmKernel::ArchMmaOperator; using MathOperator = typename B2bImplicitGemmKernel::MathOperator; static cutlass::conv::Operator const kConvolutionalOperator = B2bImplicitGemmKernel::kConvolutionalOperator; static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = B2bImplicitGemmKernel::kIteratorAlgorithm; static int const kWarpCount = (ThreadblockShape0::kM / WarpShape0::kM) * (ThreadblockShape0::kN / WarpShape0::kN); /// Argument structure using Arguments = typename B2bImplicitGemmKernel::Arguments; private: /// Kernel parameters object typename B2bImplicitGemmKernel::Params params_; public: /// Constructs Implicit GEMM B2bImplicitGemmConvolution() { } /// Determines whether the Implicit GEMM can execute the given problem. static Status can_implement(Arguments const &args) { // dispatch to iterators Status status = B2bImplicitGemmKernel::B2bMma::IteratorA0::can_implement(args.problem_size_0); if (Status::kSuccess != status) { return status; } status = B2bImplicitGemmKernel::B2bMma::IteratorB0::can_implement(args.problem_size_0); if (Status::kSuccess != status) { return status; } status = B2bImplicitGemmKernel::B2bMma::IteratorB1::can_implement(args.problem_size_1); if (Status::kSuccess != status) { return status; } // Determine grid shape ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape( threadblock_swizzle.get_tiled_shape( cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size_0), {ThreadblockShape0::kM, ThreadblockShape0::kN, ThreadblockShape0::kK}, args.problem_size_0.split_k_slices)); if (!(grid.y <= std::numeric_limits<uint16_t>::max() && grid.z <= std::numeric_limits<uint16_t>::max())) { return Status::kErrorInvalidProblem; } // Determine if fusion sizes are valid cutlass::gemm::GemmCoord problem_size_0 = implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size_0); cutlass::gemm::GemmCoord problem_size_1 = implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size_1); if(problem_size_0.m() != problem_size_1.m()) return Status::kErrorInvalidProblem; if(problem_size_0.n() != problem_size_1.k()) return Status::kErrorInvalidProblem; if(args.problem_size_1.R != 1 || args.problem_size_1.S != 1) return Status::kErrorInvalidProblem; if(problem_size_0.n() > ThreadblockShape0::kN) return Status::kErrorInvalidProblem; if(problem_size_1.n() > ThreadblockShape1::kN) return Status::kErrorInvalidProblem; return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { size_t workspace_bytes = 0; // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape( cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size_0), {ThreadblockShape0::kM, ThreadblockShape0::kN, ThreadblockShape0::kK}, args.problem_size_0.split_k_slices); if(args.split_k_mode == SplitKMode::kParallel) { // Split-K parallel: CTAs in k-dimension write the partial results in a temporary workspace. // The user needs to call a reduction operator to optain the final output tensor workspace_bytes = sizeof(ElementAccumulator) * size_t(cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, args.problem_size_0)) * size_t(grid_tiled_shape.k()); } else if(args.split_k_mode == SplitKMode::kSerial && args.problem_size_0.split_k_slices > 1) { // Split-K serial: The user workspace is used to store semaphore and serialize writing the // final reduced output to user's output tensor workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n()); } return workspace_bytes; } /// Initializes GEMM state from arguments. Status initialize( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { if (args.problem_size_0.split_k_slices > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } cudaError_t status = cudaMemsetAsync(workspace, 0, get_workspace_size(args), stream); if (status != cudaSuccess) { return Status::kErrorInternal; } } // initialize the params structure from the arguments params_ = typename B2bImplicitGemmKernel::Params( args, static_cast<int *>(workspace) ); int smem_size = int(sizeof(typename B2bImplicitGemmKernel::SharedStorage)); if (smem_size >= (48 << 10)) { cudaError_t result = cudaFuncSetAttribute(cutlass::Kernel<B2bImplicitGemmKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } return Status::kSuccess; } /// Initializes GEMM state from arguments. Status update(Arguments const &args, void *workspace = nullptr) { // update the params structure from the arguments params_.ptr_A0 = args.ref_A0.data(); params_.ptr_B0 = args.ref_B0.data(); params_.ptr_C0 = args.ref_C0.data(); params_.ptr_Scale0 = args.ref_Scale0.data(); params_.ptr_Bias0 = args.ref_Bias0.data(); params_.ptr_B1 = args.ref_B1.data(); params_.ptr_C1 = args.ref_C1.data(); params_.ptr_D1 = args.ref_D1.data(); params_.output_op_0 = args.output_op_0; params_.output_op_1 = args.output_op_1; params_.semaphore = static_cast<int *>(workspace); return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); dim3 block(32 * kWarpCount, 1, 1); int smem_size = int(sizeof(typename B2bImplicitGemmKernel::SharedStorage)); cutlass::Kernel<B2bImplicitGemmKernel><<<grid, block, smem_size, stream>>>(params_); cudaError_t result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/13_two_tensor_op_fusion/device/b2b_implicit_gemm_convolution.h/0
{ "file_path": "cutlass/examples/13_two_tensor_op_fusion/device/b2b_implicit_gemm_convolution.h", "repo_id": "cutlass", "token_count": 4115 }
7
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** The example demonstrates how to reduce one of the operands of the GEMM along the k-dimension when computing GEMM. So the output also contains either a Mx1 or 1XN vector. It only works with Ampere 16x8x16 FP16/BF16 tensor cores, though it is not difficult to apply to other Turing/Ampere tensor core instructions. Most of the reduction is done in gemm/warp level, see gemm/warp/mma_with_reduction_tensor_op.h A few bit of reduction is done in the epilogue before storing the vector, see epilogue/threadblock/epilogue_gemm_k_reduction.h */ #include <iostream> #include <fstream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_with_k_reduction.h" #include "cutlass/gemm/kernel/default_gemm_with_k_reduction.h" #include "cutlass/reduction/device/reduce_split_k.h" #include "cutlass/reduction/kernel/reduce_split_k.h" #include "cutlass/reduction/thread/reduction_operators.h" #include "cutlass/matrix_coord.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/convolution.h" #include "helper.h" // The code section below describes datatype for input, output tensors and computation between // elements using ElementAccumulator = float; // Data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation using ElementInputA = cutlass::bfloat16_t; // Data type of elements in input tensor using ElementInputB = cutlass::bfloat16_t; // Data type of elements in input tensor using ElementOutput = cutlass::bfloat16_t; // Data type of elements in output tensor using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::RowMajor; using LayoutOutput = cutlass::layout::ColumnMajor; // Layout of the output vector using LayoutGemmKReduction = cutlass::layout::PitchLinear; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>; // Number of pipelines you want to use constexpr int NumStages = 4; // Reduce A or B operand along the K dimension constexpr bool ReduceKForA = true; // Alignment of A operand constexpr int AlignmentA = 8; // Alignment of B operand constexpr int AlignmentB = 8; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; using Gemm = typename cutlass::gemm::device::GemmWithKReduction< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, ReduceKForA, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, AlignmentA, AlignmentB, cutlass::arch::OpMultiplyAdd, cutlass::ComplexTransform::kNone, cutlass::ComplexTransform::kNone >; // Below is the reduction kernel used in the case of parallel split-k using ReduceGemmSplitKShape = cutlass::MatrixShape<4, 64>;; using ReduceOp = cutlass::reduction::thread::ReduceAdd< ElementAccumulator, ElementOutput, EpilogueOp::kCount >; using ReduceGemmSplitKKernel = cutlass::reduction::kernel::ReduceSplitK< ReduceGemmSplitKShape, EpilogueOp, ReduceOp >; using ReduceGemmSplitK = cutlass::reduction::device::ReduceSplitK<ReduceGemmSplitKKernel>; using ReduceVectorSplitKShape = cutlass::MatrixShape<1, 256>;; // This code section describes the epilogue part of the kernel, we use default value using DummyEpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue, cutlass::epilogue::thread::ScaleType::Nothing>; using ReduceVectorSplitKKernel = cutlass::reduction::kernel::ReduceSplitK< ReduceVectorSplitKShape, DummyEpilogueOp, ReduceOp >; using ReduceVectorSplitK = cutlass::reduction::device::ReduceSplitK<ReduceVectorSplitKKernel>; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::gemm::GemmCoord problem_size; int split_k_slices; bool parallel_split_k; bool reference_check; bool measure_performance; int iterations; bool save_workspace; ElementComputeEpilogue alpha; ElementComputeEpilogue beta; bool benchmark; std::string tag; Options(): help(false), problem_size(1024, 1024, 1024), split_k_slices(1), parallel_split_k(false), reference_check(true), measure_performance(false), iterations(20), save_workspace(false), alpha(-1), beta(-1), benchmark(false) { } // Verify the problem size is compatible with the CUTLASS Convolution implementation. bool valid() { // // CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 8 elements. // int const kAlignment = 8; if ((problem_size.m() % kAlignment) || (problem_size.n() % kAlignment) || (problem_size.k() % kAlignment)) { // misaligned tensors return false; } return true; } /// Updates input and filter sizes void update( cutlass::gemm::GemmCoord problem_size, int split_k_slices, bool parallel_split_k) { this->problem_size = problem_size; this->split_k_slices = split_k_slices; this->parallel_split_k = parallel_split_k; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } if (cmd.check_cmd_line_flag("parallel-split-k")) { parallel_split_k = true; } if (cmd.check_cmd_line_flag("ref-check")) { reference_check = true; } if (cmd.check_cmd_line_flag("perf-check")) { measure_performance = true; } if (cmd.check_cmd_line_flag("save-workspace")) { save_workspace = true; } if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("split-k-slices", split_k_slices); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("tag", tag); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "23_ampere_operand_gemm_reduction_fusion\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --m=<int> GEMM M\n" << " --n=<int> GEMM N\n" << " --k=<int> GEMM K\n" << " --split-k-slices=<int> Split K Slices\n" << " --alpha=<float> Epilogue scalar alpha\n" << " --beta=<float> Epilogue scalar beta\n\n" << " --parallel-split-k If set (true), use parallel split K\n" << " --ref-check If set (true), reference check on the host is computed\n" << " --perf-check If set (true), performance is measured.\n" << " --benchmark If set (true), performance benchmarking on several problem sizes.\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --save-workspace If set, workspace is written to a text file.\n" << " --tag=<string> String to replicate across the first column in the results table\n"; out << "\n\nExamples:\n\n" << "$ ./examples/23_ampere_gemm_operand_reduction_fusion/23_ampere_gemm_operand_reduction_fusion --m=1024 --n=1024 --k=1024 \n\n"; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// struct Result { double runtime_ms; cutlass::Status status; cutlass::Status reference_check; cudaError_t error; Result(): runtime_ms(0), status(cutlass::Status::kSuccess), reference_check(cutlass::Status::kInvalid), error(cudaSuccess) { } static std::ostream & print_header(std::ostream &out, Options const &options) { if (!options.tag.empty()) { out << "Name,"; } out << "ID,M,N,K,SplitK-Slices,Parallel-SplitK,Runtime"; return out; } std::ostream & print(std::ostream &out, int idx, Options const &options) { if (!options.tag.empty()) { out << options.tag << ","; } out << "gemm_" << idx << "," << options.problem_size.m() << "," << options.problem_size.n() << "," << options.problem_size.k() << "," << options.split_k_slices << "," << options.parallel_split_k << "," << runtime_ms ; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Runs one benchmark Result profile(Options const &options) { Result result; // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.problem_size.mk()); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.problem_size.kn()); // Create tensor C with dimensions 1x1x1xk which is the bias vector cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.problem_size.mn()); // Create tensor D used to store output from CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.problem_size.mn()); // Create matrix D with dimensions M x N used to store output from reference // kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.problem_size.mn()); int reduce_vector_length = ReduceKForA ? options.problem_size.m() : options.problem_size.n(); cutlass::HostTensor<ElementOutput, LayoutGemmKReduction> tensor_reduction({reduce_vector_length, 1}); cutlass::HostTensor<ElementOutput, LayoutGemmKReduction> tensor_ref_reduction({reduce_vector_length, 1}); // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1997, ElementInputA(1), ElementInputA(-1), 0); // <- Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 2003, ElementInputB(1), ElementInputB(-1), 0); // <- Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 2017, ElementOutput(1), ElementOutput(-1), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros cutlass::reference::host::TensorFill( tensor_reduction.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_reduction.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); tensor_reduction.sync_device(); // Initialize alpha for dot product computation ElementComputeEpilogue alpha = options.parallel_split_k ? ElementComputeEpilogue(1) : ElementComputeEpilogue(options.alpha); ElementComputeEpilogue beta = options.parallel_split_k ? ElementComputeEpilogue(0) : ElementComputeEpilogue(options.beta); cutlass::gemm::GemmUniversalMode mode = options.parallel_split_k ? cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel : cutlass::gemm::GemmUniversalMode::kGemm; int batch_count = options.split_k_slices; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments( mode, options.problem_size, batch_count, {alpha, beta}, tensor_a.device_ref().data(), // <- reference to tensor A on device tensor_b.device_ref().data(), // <- reference to tensor B on device tensor_c.device_ref().data(), // <- reference to matrix C on device tensor_d.device_ref().data(), // <- reference to matrix D on device tensor_reduction.device_ref().data(), // <- reference to reduction tensor on device options.problem_size.m() * options.problem_size.k(), options.problem_size.n() * options.problem_size.k(), options.problem_size.m() * options.problem_size.n(), options.problem_size.m() * options.problem_size.n(), reduce_vector_length, tensor_a.layout().stride(0), tensor_b.layout().stride(0), tensor_c.layout().stride(0), tensor_d.layout().stride(0), tensor_reduction.layout().stride(0)); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Check the problem size is supported or not result.status = gemm_op.can_implement(arguments); CUTLASS_CHECK(result.status); // Initialize CUTLASS kernel with arguments and workspace pointer result.status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(result.status); // Launch initialized CUTLASS kernel result.status = gemm_op(); CUTLASS_CHECK(result.status); if (options.parallel_split_k && batch_count > 1) { // reduce gemm ElementComputeEpilogue alpha = ElementComputeEpilogue(options.alpha); ElementComputeEpilogue beta = ElementComputeEpilogue(options.beta); int splitk_gemm_stride = options.problem_size.m(); cutlass::layout::RowMajor splitk_gemm_layout(splitk_gemm_stride); void * workspace_gemm_ptr = workspace.get(); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> workspace_gemm_tensorref(static_cast<ElementOutput *>(workspace_gemm_ptr), splitk_gemm_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_d_tensorref(tensor_d.device_ref().data(), splitk_gemm_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_c_tensorref(tensor_c.device_ref().data(), splitk_gemm_layout); typename ReduceGemmSplitK::Arguments reduce_gemm_splitk_arguments{ cutlass::MatrixCoord(options.problem_size.n(), options.problem_size.m()), batch_count, size_t(options.problem_size.m() * options.problem_size.n()), workspace_gemm_tensorref, tensor_d_tensorref, tensor_c_tensorref, {alpha, beta} }; ReduceGemmSplitK reduce_gemm_splitk_op; result.status = reduce_gemm_splitk_op.initialize(reduce_gemm_splitk_arguments); CUTLASS_CHECK(result.status); result.status = reduce_gemm_splitk_op(); CUTLASS_CHECK(result.status); // reduce k vector cutlass::layout::RowMajor splitk_vector_layout(reduce_vector_length); ElementOutput *workspace_vector_ptr = static_cast<ElementOutput *>(workspace_gemm_ptr) + batch_count * options.problem_size.m() * options.problem_size.n(); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> workspace_vector_tensorref(workspace_vector_ptr, splitk_vector_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_reduction_tensorref(tensor_reduction.device_ref().data(), splitk_vector_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_nullptr_tensorref(nullptr, splitk_vector_layout); typename ReduceVectorSplitK::Arguments reduce_vector_splitk_arguments( cutlass::MatrixCoord(1, reduce_vector_length), batch_count, size_t(reduce_vector_length), workspace_vector_tensorref, tensor_reduction_tensorref, tensor_nullptr_tensorref, {1.0f, 0.0f}); ReduceVectorSplitK reduce_vector_splitk_op; result.status = reduce_vector_splitk_op.initialize(reduce_vector_splitk_arguments); CUTLASS_CHECK(result.status); result.status = reduce_vector_splitk_op(); CUTLASS_CHECK(result.status); } // // Create instantiation for device reference conv kernel // if (options.reference_check) { // Launch device reference to compute strictly the product A * B cutlass::reference::device::Gemm< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator> gemm_device; gemm_device ( options.problem_size, ElementComputeEpilogue(options.alpha), tensor_a.device_ref(), tensor_b.device_ref(), ElementComputeEpilogue(options.beta), tensor_c.device_ref(), tensor_ref_d.device_ref() ); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); tensor_reduction.sync_host(); // Reduce K in host code if (ReduceKForA) { for (int m = 0; m < options.problem_size.m(); ++m) { for (int k = 0; k < options.problem_size.k(); ++k) { tensor_ref_reduction.at({m, 0}) += tensor_a.at(cutlass::MatrixCoord(m, k)); } } } else { for (int k = 0; k < options.problem_size.k(); ++k) { for (int n = 0; n < options.problem_size.n(); ++n) { tensor_ref_reduction.at({n, 0}) += tensor_b.at(cutlass::MatrixCoord(k, n)); } } } // Check if output from CUTLASS kernel and reference kernel are equal or not bool pass = cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()); pass &= cutlass::reference::host::TensorEquals(tensor_ref_reduction.host_view(), tensor_reduction.host_view()); if (!pass) { result.reference_check = cutlass::Status::kErrorInternal; std::cout << "ERROR - results miscompared.\n"; } else { result.reference_check = cutlass::Status::kSuccess; std::cout << "Passed.\n"; } } else { result.reference_check = cutlass::Status::kInvalid; } if (options.save_workspace) { std::stringstream ss; ss << "23_ampere_gemm_operand_reduction_fusion" << options.problem_size.m() << "x" << options.problem_size.n() << "x" << options.problem_size.k() << ".dat"; std::ofstream output_workspace(ss.str()); output_workspace << "A = \n" << tensor_a.host_view() << "\n\n" << "B = \n" << tensor_b.host_view() << "\n\n"; if (options.reference_check) { output_workspace << "Reference D = \n" << tensor_ref_d.host_view() << "\n\n"; output_workspace << "Reference reduction vector = \n" << tensor_ref_reduction.host_view() << "\n\n"; } output_workspace << "Computed D = \n" << tensor_d.host_view() << std::endl; output_workspace << "Computed reduction vector = \n" << tensor_reduction.host_view() << std::endl; std::cout << "Results written to '" << ss.str() << "'." << std::endl; } // // Performance measurement // if (options.measure_performance) { cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } } // Record an event at the start of a series of convolution operations. result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Launch a sequence of implicit GEMM operations on the device for (int iteration = 0; iteration < options.iterations; ++iteration) { result.status = gemm_op(); CUTLASS_CHECK(result.status); } // Record an event when the convolutions have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Print average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } } return result; } int main(int argc, char const **args) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; CUDA_CHECK(cudaGetDeviceProperties(&props, 0)); if (!(props.major >= 8)) { std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { return 0; } Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.benchmark) { // Benchmark several layers struct Benchmark { int m, n, k, split_k_slices, parallel_split_k; } problem_sizes[] = { {4096, 6144, 4096, 1, false}, }; Result::print_header(std::cout, options) << "\n"; int idx = 1; for (auto const &problem_size : problem_sizes) { options.update({problem_size.m, problem_size.n, problem_size.k}, problem_size.split_k_slices, problem_size.parallel_split_k); Result result = profile(options); result.print(std::cout, idx, options) << "\n"; ++idx; } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << "\n"; return -1; } Result result = profile(options); Result::print_header(std::cout, options) << "\n"; result.print(std::cout, 1, options) << "\n"; } return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/23_ampere_gemm_operand_reduction_fusion/ampere_gemm_operand_reduction_fusion.cu/0
{ "file_path": "cutlass/examples/23_ampere_gemm_operand_reduction_fusion/ampere_gemm_operand_reduction_fusion.cu", "repo_id": "cutlass", "token_count": 10387 }
8
################################################################################ # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################ """ Basic example of using the CUTLASS Python interface to run a grouped GEMM """ import sys print("This example is deprecated. Please see examples/python for examples of using " "the CUTLASS Python interface.") sys.exit(0) import argparse import numpy as np import cutlass_bindings import cutlass.backend as pycutlass from cutlass.backend import * from cutlass.backend.utils.device import device_cc parser = argparse.ArgumentParser(description="Launch a grouped GEMM kernel from Python") parser.add_argument('--print_cuda', action="store_true", help="Print the underlying CUDA kernel") try: args = parser.parse_args() except: sys.exit(0) # Check that the device is of a sufficient compute capability cc = device_cc() assert cc >= 70, "The CUTLASS Python grouped GEMM example requires compute capability greater than or equal to 70." np.random.seed(0) # Allocate a pool of device memory to be used by the kernel pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32) # Set the compiler to use to NVCC pycutlass.compiler.nvcc() # Set up A, B, C and accumulator alignment = 1 A = TensorDescription(cutlass_bindings.float16, cutlass_bindings.ColumnMajor, alignment) B = TensorDescription(cutlass_bindings.float16, cutlass_bindings.RowMajor, alignment) C = TensorDescription(cutlass_bindings.float32, cutlass_bindings.ColumnMajor, alignment) element_acc = cutlass_bindings.float32 element_epilogue = cutlass_bindings.float32 # Select instruction shape based on the Tensor Core instructions supported # by the device on which we are running if cc == 70: instruction_shape = [8, 8, 4] elif cc == 75: instruction_shape = [16, 8, 8] else: # Use CUTLASS kernels for CC 80 by default (e.g., for cases in which SM86 is used) cc = 80 instruction_shape = [16, 8, 16] math_inst = MathInstruction( instruction_shape, A.element, B.element, element_acc, cutlass_bindings.OpClass.TensorOp, MathOperation.multiply_add ) tile_description = TileDescription( [128, 128, 32], # Threadblock shape 2, # Number of stages [2, 2, 1], # Number of warps within each dimension of the threadblock shape math_inst ) epilogue_functor = pycutlass.LinearCombination(C.element, C.alignment, element_acc, element_epilogue) operation = GemmOperationGrouped( arch=cc, tile_description=tile_description, A=A, B=B, C=C, epilogue_functor=epilogue_functor, precompute_mode=SchedulerMode.Device) if args.print_cuda: print(operation.rt_module.emit()) operations = [operation, ] # Compile the operation pycutlass.compiler.add_module(operations) # Initialize tensors for each problem in the group problem_sizes = [ cutlass_bindings.gemm.GemmCoord(128, 128, 64), cutlass_bindings.gemm.GemmCoord(512, 256, 128) ] problem_count = len(problem_sizes) alpha = 1. beta = 0. tensor_As = [] tensor_Bs = [] tensor_Cs = [] tensor_Ds = [] tensor_D_refs = [] reference = ReferenceModule(A, B, C) for problem_size in problem_sizes: # Randomly initialize tensors m = problem_size.m() n = problem_size.n() k = problem_size.k() tensor_A = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(m * k,))).astype(np.float16) tensor_B = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(k * n,))).astype(np.float16) tensor_C = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(m * n,))).astype(np.float32) tensor_D = np.zeros(shape=(m * n,)).astype(np.float32) tensor_As.append(tensor_A) tensor_Bs.append(tensor_B) tensor_Cs.append(tensor_C) tensor_Ds.append(tensor_D) # Run the reference GEMM tensor_D_ref = reference.run(tensor_A, tensor_B, tensor_C, problem_size, alpha, beta) tensor_D_refs.append(tensor_D_ref) arguments = GemmGroupedArguments( operation, problem_sizes, tensor_As, tensor_Bs, tensor_Cs, tensor_Ds, output_op=operation.epilogue_type(alpha, beta) ) # Run the operation operation.run(arguments) arguments.sync() # Compare the CUTLASS result to the host reference result for tensor_d, tensor_d_ref in zip(tensor_Ds, tensor_D_refs): try: assert np.array_equal(tensor_d, tensor_d_ref) except: assert np.allclose(tensor_d, tensor_d_ref, rtol=1e-5) print("Passed.")
cutlass/examples/40_cutlass_py/gemm_grouped.py/0
{ "file_path": "cutlass/examples/40_cutlass_py/gemm_grouped.py", "repo_id": "cutlass", "token_count": 2081 }
9
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# import helper indentation = " " def append_word(word): code = "" code += word code += " " return code def gen_namespace(namespace, codeBody): code_gen = "namespace " + namespace + " {\n" code_gen += codeBody code_gen += "} // namespace " + namespace + "\n" return code_gen def gen_expression(type, lval, rval = None): code_gen = "" code_gen += append_word(type) code_gen += append_word(lval) if rval is not None: code_gen += append_word("=") code_gen += append_word(rval) return code_gen def gen_class(name, codeBody, inheritance_code = None): code_gen = "" if inheritance_code is None: code_gen = "class " + name + "{\n" else: code_gen = "class " + name + " : "+ inheritance_code + "{\n" code_gen += codeBody code_gen += "}; // class " + name + "\n" return code_gen def gen_struct(name, codeBody, specialized = None): specialized_code = "" if specialized is not None: specialized_code = "<" + specialized + ">" code_gen = "struct " + name + specialized_code + "{\n" code_gen += codeBody code_gen += "}; // struct " + name + "\n" return code_gen def gen_template_arg(arg_type, arg_name, default_val = None): rval = None if default_val is not None: rval = str(default_val) arg_typename = "" if arg_type is int: arg_typename = "int" elif arg_type is bool: arg_typename = "bool" else: arg_typename = "typename" internal_arg_name = arg_name + "_" code_gen = indentation code_gen += gen_expression(arg_typename, internal_arg_name, rval) return code_gen def gen_template_args(args, set_default = True): arg_len = len(args) cnt = 1 code_gen = "" for arg_tuple in args: arg_type = arg_tuple[0] arg_name = arg_tuple[1] arg_default_val = None if len(arg_tuple) == 3 and set_default: arg_default_val = arg_tuple[2] code_gen += gen_template_arg(arg_type, arg_name, arg_default_val) if cnt != arg_len: code_gen += ",\n" cnt += 1 return code_gen def gen_template_head(args, set_default = True): code_gen = "template <\n" code_gen += gen_template_args(args, set_default) code_gen += ">\n" return code_gen def export_template_args(args): code_gen = "public:\n" for arg_tuple in args: code_gen += indentation arg_type = arg_tuple[0] arg_name = arg_tuple[1] internal_arg_name = arg_name + "_" typename = "" if arg_type is int: typename = "static int const" elif arg_type is bool: typename = "static bool const" else: typename = "using" code_gen += gen_expression(typename, arg_name, internal_arg_name) code_gen += ";\n" return code_gen def gen_template_class(class_name, args, codeBody, set_default = True, inheritance_code = None): code_gen = "" code_gen += gen_template_head(args, set_default) code_gen += gen_class(class_name, export_template_args(args) + codeBody, inheritance_code) return code_gen def gen_template_struct(struct_name, args, codeBody, speicalized = None, set_default = True, export_args = True): code_gen = "" code_gen += gen_template_head(args, set_default) code = export_template_args(args) + codeBody if export_args is False: code = codeBody code_gen += gen_struct(struct_name, code , speicalized) return code_gen def gen_declare_template_struct(name, *params): code = name + "<" cnt = 0 param_num = len(params) for param in params: final = ", " if cnt == param_num - 1: final = "" code += param + final cnt += 1 code += ">;\n" return code def filtered_param(params, name_and_value_pair, keep_ = False): rtn_template_args = [] speicalized_template_args = [] for param in params: param_name = "" if len(param) >= 1: param_name = param[1] else: param_name = param[0] hit_flag = False set_value = "" for n_v_pair in name_and_value_pair: filter_name = n_v_pair[0] set_value = n_v_pair[1] if param_name == (filter_name + "_") or param_name == filter_name : hit_flag = True break if hit_flag is False: rtn_template_args.append(param) if hit_flag is True: speicalized_template_args.append(set_value) else: if keep_ is True: speicalized_template_args.append(param_name + "_") else: speicalized_template_args.append(param_name) specialized_template_arg_str = helper.list_2_string(speicalized_template_args) return rtn_template_args, specialized_template_arg_str def gen_func(func_name, arg_lists, code_body, only_declare = False, with_cudaStream = True): code = "void " + func_name + "(\n" for arg in arg_lists: arg_tp = arg[0] arg_nm = arg[1] code += " " + arg_tp + " " + arg_nm + ",\n" code += "cudaStream_t stream)" if only_declare : return code code += "{\n" code += code_body + "\n" code += "}\n" return code def indent_level(code, level = 0): rtn_code = "" for i in range(level): rtn_code += " " rtn_code += code return rtn_code
cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_ir.py/0
{ "file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_ir.py", "repo_id": "cutlass", "token_count": 2983 }
10
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/semaphore.h" #include "../threadblock/dual_mma_multistage.h" #include "../threadblock/dual_epilogue.h" #include "../dual_gemm_common.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename DualMma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue0_, ///! Epilogue typename Epilogue1_, ///! Epilogue typename OutputOp2_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function bool SplitKSerial, ///! If true, code supporting split-K via serial reduction is enabled. bool StoreD0, bool StoreD1 > struct DualGemm { using DualMma = DualMma_; using Epilogue0 = Epilogue0_; using Epilogue1 = Epilogue1_; using OutputOp0 = typename Epilogue0::OutputOp; using OutputOp1 = typename Epilogue1::OutputOp; using OutputOp2 = OutputOp2_; using ThreadblockSwizzle = ThreadblockSwizzle_; static constexpr bool kStoreD0 = StoreD0; static constexpr bool kStoreD1 = StoreD1; using DualEpilogue = cutlass::epilogue::threadblock::DualEpilogue< typename Epilogue0::Shape, typename Epilogue0::WarpMmaOperator, Epilogue0::kPartitionsK, typename Epilogue0::OutputTileIterator, typename Epilogue0::AccumulatorFragmentIterator, typename Epilogue0::WarpTileIterator, typename Epilogue0::SharedLoadIterator, OutputOp0, OutputOp1, OutputOp2, typename Epilogue0::Padding, kStoreD0, kStoreD1, Epilogue0::kFragmentsPerIteration, true // IterationsUnroll >; using ElementA = typename DualMma::IteratorA::Element; using ElementB = typename DualMma::IteratorB0::Element; using ElementC = typename DualEpilogue::OutputTileIterator::Element; static bool const kSplitKSerial = SplitKSerial; static_assert(!kSplitKSerial || (kStoreD0 && kStoreD1), "Split-K serial requires buffers for D0/D1 for reduction"); /// Warp count (concept: GemmShape) using WarpCount0 = typename DualMma::WarpCount; static int const kThreadCount = 32 * WarpCount0::kCount; /// Parameters structure struct Params { DualGemmMode mode; cutlass::gemm::GemmCoord problem_size; cutlass::gemm::GemmCoord grid_tiled_shape; int swizzle_log_tile; // Mma0 typename DualMma::IteratorA::Params params_A0; typename DualMma::IteratorA::TensorRef ref_A0; typename DualMma::IteratorB0::Params params_B0; typename DualMma::IteratorB0::TensorRef ref_B0; typename Epilogue0::OutputTileIterator::Params params_C0; typename Epilogue0::OutputTileIterator::TensorRef ref_C0; typename Epilogue0::OutputTileIterator::Params params_D0; typename Epilogue0::OutputTileIterator::TensorRef ref_D0; typename OutputOp0::Params output_op_0; // Mma1 typename DualMma::IteratorB1::Params params_B1; typename DualMma::IteratorB1::TensorRef ref_B1; typename Epilogue1::OutputTileIterator::Params params_C1; typename Epilogue1::OutputTileIterator::TensorRef ref_C1; typename Epilogue1::OutputTileIterator::Params params_D1; typename Epilogue1::OutputTileIterator::TensorRef ref_D1; typename OutputOp1::Params output_op_1; typename Epilogue1::OutputTileIterator::Params params_D2; typename Epilogue1::OutputTileIterator::TensorRef ref_D2; typename OutputOp2::Params output_op_2; int *semaphore; int gemm_k_size; int64_t batch_stride_A; int64_t batch_stride_B0; int64_t batch_stride_B1; int64_t batch_stride_C; int64_t batch_stride_D; // // Methods // CUTLASS_HOST_DEVICE Params(): swizzle_log_tile(0), semaphore(0), gemm_k_size(0) { } CUTLASS_HOST_DEVICE Params( DualGemmMode mode, cutlass::gemm::GemmCoord const & problem_size, cutlass::gemm::GemmCoord const & grid_tiled_shape, // Mma0: D0 = A @ B0 + C0 typename DualMma::IteratorA::TensorRef ref_A0, typename DualMma::IteratorB0::TensorRef ref_B0, typename Epilogue0::OutputTileIterator::TensorRef ref_C0, typename Epilogue0::OutputTileIterator::TensorRef ref_D0, // Mma1: D1 = A @ B1 + C1 typename DualMma::IteratorB1::TensorRef ref_B1, typename Epilogue1::OutputTileIterator::TensorRef ref_C1, typename Epilogue1::OutputTileIterator::TensorRef ref_D1, typename Epilogue1::OutputTileIterator::TensorRef ref_D2, typename OutputOp0::Params output_op_0 = typename OutputOp0::Params(), typename OutputOp1::Params output_op_1 = typename OutputOp1::Params(), typename OutputOp2::Params output_op_2 = typename OutputOp2::Params(), int *workspace = nullptr, int64_t batch_stride_A = 1, int64_t batch_stride_B0 = 1, int64_t batch_stride_B1 = 1, int64_t batch_stride_C = 1, int64_t batch_stride_D = 1 ): mode(mode), problem_size(problem_size), grid_tiled_shape(grid_tiled_shape), swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), // Mma0 params_A0(ref_A0.layout()), ref_A0(ref_A0), params_B0(ref_B0.layout()), ref_B0(ref_B0), params_C0(ref_C0.layout()), ref_C0(ref_C0), params_D0(ref_D0.layout()), ref_D0(ref_D0), // Mma1 params_B1(ref_B1.layout()), ref_B1(ref_B1), params_C1(ref_C1.layout()), ref_C1(ref_C1), params_D1(ref_D1.layout()), ref_D1(ref_D1), params_D2(ref_D2.layout()), ref_D2(ref_D2), output_op_0(output_op_0), output_op_1(output_op_1), output_op_2(output_op_2), batch_stride_A(batch_stride_A), batch_stride_B0(batch_stride_B0), batch_stride_B1(batch_stride_B1), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D) { int total_gemm_k_iterations = (problem_size.k() + DualMma::Shape::kK - 1) / DualMma::Shape::kK; int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k(); gemm_k_size = gemm_k_iterations * DualMma::Shape::kK; semaphore = workspace; } }; /// Shared memory storage structure union SharedStorage { typename DualMma::SharedStorage main_loop; typename DualEpilogue::SharedStorage epilogue; }; // // Methods // CUTLASS_HOST_DEVICE DualGemm() { } /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size, typename DualMma::IteratorA::TensorRef ref_A0, typename DualMma::IteratorB0::TensorRef ref_B0, typename Epilogue0::OutputTileIterator::TensorRef ref_C0, typename Epilogue0::OutputTileIterator::TensorRef ref_D0, typename DualMma::IteratorB1::TensorRef ref_B1, typename Epilogue1::OutputTileIterator::TensorRef ref_C1, typename Epilogue1::OutputTileIterator::TensorRef ref_D1, typename Epilogue1::OutputTileIterator::TensorRef ref_D2) { static int const kAlignmentA = DualMma::IteratorA::AccessType::kElements; static int const kAlignmentB = DualMma::IteratorB0::AccessType::kElements; static int const kAlignmentC = Epilogue0::OutputTileIterator::kElementsPerAccess; if (!TensorRef_aligned(ref_A0, kAlignmentA)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_B0, kAlignmentB)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_C0, kAlignmentC)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_D0, kAlignmentC)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_B1, kAlignmentB)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_C1, kAlignmentC)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_D1, kAlignmentC)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_D2, kAlignmentC)) { return Status::kErrorMisalignedOperand; } return Status::kSuccess; } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } int offset_k = 0; int problem_size_k = params.problem_size.k(); ElementA *ptr_A0 = static_cast<ElementA *>(params.ref_A0.data()); ElementB *ptr_B0 = static_cast<ElementB *>(params.ref_B0.data()); ElementB *ptr_B1 = static_cast<ElementB *>(params.ref_B1.data()); // // Fetch pointers based on mode. // if (params.mode == DualGemmMode::kGemm) { if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; } offset_k = threadblock_tile_offset.k() * params.gemm_k_size; } else if (params.mode == DualGemmMode::kBatched) { ptr_A0 += threadblock_tile_offset.k() * params.batch_stride_A; ptr_B0 += threadblock_tile_offset.k() * params.batch_stride_B0; ptr_B1 += threadblock_tile_offset.k() * params.batch_stride_B1; } // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A0{ threadblock_tile_offset.m() * DualMma::Shape::kM, offset_k, }; cutlass::MatrixCoord tb_offset_B0{ offset_k, threadblock_tile_offset.n() * DualMma::Shape::kN }; cutlass::MatrixCoord tb_offset_B1{ offset_k, threadblock_tile_offset.n() * DualMma::Shape::kN }; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands typename DualMma::IteratorA iterator_A0( params.params_A0, ptr_A0, {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_A0); typename DualMma::IteratorB0 iterator_B0( params.params_B0, ptr_B0, {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B0); typename DualMma::IteratorB1 iterator_B1( params.params_B1, ptr_B1, {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B1); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply typename DualMma::FragmentC accum0; typename DualMma::FragmentC accum1; accum0.clear(); accum1.clear(); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - offset_k + DualMma::Shape::kK - 1) / DualMma::Shape::kK; DualMma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); if (!kSplitKSerial || gemm_k_iterations > 0) { // Compute threadblock-scoped matrix multiply-add mma(gemm_k_iterations, accum0, accum1, iterator_A0, iterator_B0, iterator_B1, accum0, accum1); } // // Epilogue // OutputOp0 output_op_0(params.output_op_0); OutputOp1 output_op_1(params.output_op_1); OutputOp2 output_op_2(params.output_op_2); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * DualMma::Shape::kM, threadblock_tile_offset.n() * DualMma::Shape::kN ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); ElementC *ptr_C0 = static_cast<ElementC *>(params.ref_C0.data()); ElementC *ptr_C1 = static_cast<ElementC *>(params.ref_C1.data()); ElementC *ptr_D0 = static_cast<ElementC *>(params.ref_D0.data()); ElementC *ptr_D1 = static_cast<ElementC *>(params.ref_D1.data()); ElementC *ptr_D2 = static_cast<ElementC *>(params.ref_D2.data()); // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); if (params.mode == DualGemmMode::kGemm) { // If performing a reduction via split-K, fetch the initial synchronization if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op_0.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); output_op_1.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } } else if (params.mode == DualGemmMode::kBatched) { ptr_C0 += threadblock_tile_offset.k() * params.batch_stride_C; ptr_C1 += threadblock_tile_offset.k() * params.batch_stride_C; ptr_D0 += threadblock_tile_offset.k() * params.batch_stride_D; ptr_D1 += threadblock_tile_offset.k() * params.batch_stride_D; ptr_D2 += threadblock_tile_offset.k() * params.batch_stride_D; } // Tile iterator loading from source tensor. typename Epilogue0::OutputTileIterator iterator_C0( params.params_C0, ptr_C0, params.problem_size.mn(), thread_idx, threadblock_offset ); typename Epilogue1::OutputTileIterator iterator_C1( params.params_C1, ptr_C1, params.problem_size.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to destination tensor. typename Epilogue0::OutputTileIterator iterator_D0( params.params_D0, ptr_D0, params.problem_size.mn(), thread_idx, threadblock_offset ); typename Epilogue1::OutputTileIterator iterator_D1( params.params_D1, ptr_D1, params.problem_size.mn(), thread_idx, threadblock_offset ); typename Epilogue1::OutputTileIterator iterator_D2( params.params_D2, ptr_D2, params.problem_size.mn(), thread_idx, threadblock_offset ); DualEpilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C0 = iterator_D0; iterator_C1 = iterator_D1; } semaphore.wait(threadblock_tile_offset.k()); __threadfence(); } // Execute the epilogue operator to update the destination tensor. typename Epilogue0::OutputTileIterator source_iters[] = { iterator_C0, iterator_C1 }; const bool writeToD2 = (!kSplitKSerial || params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1); epilogue( output_op_0, output_op_1, output_op_2, iterator_D0, iterator_D1, iterator_D2, accum0, accum1, source_iters, writeToD2 ); // // Release the semaphore // if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } __threadfence(); semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass
cutlass/examples/45_dual_gemm/kernel/dual_gemm.h/0
{ "file_path": "cutlass/examples/45_dual_gemm/kernel/dual_gemm.h", "repo_id": "cutlass", "token_count": 7424 }
11
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Hopper GEMM example with different data types using CUTLASS 3.0 APIs for NVIDIA Hopper architecture This example shows how to perform GEMM where the input tensors A and B have different element types. CUTLASS currently supports upcasting from a narrower (fewer bits) to a wider (more bits) type and utilizing the tensor core instruction for the wider type. For instance, when doing INT8 x FP16, CUTLASS will convert INT8 -> FP16 and do math using FP16 tensor cores. Similarly, for INT4 x INT8, it will upcast to INT8 and issue math using INT8 tensor cores. The narrower type always passes through the register file. Therefore, in cases where the narrower type is operand B, the collective will implicitly swap A and B in the main loop. However, implicit swaps do not support TMA epilogues. Consequently, it is essential to consider this when constructing the epilogue, as illustrated in this example. Note that in this example, we explicitly swap A and B in order to use TMA epilogues. We do this since TMA epilogues are more performant on problem sizes of interest. It is expected that the scale's K dimension be scale_k = ceil_div(problem_k, group_size). Scales are always expected to be MN major. This means the fastest changing dimension must be M if A is scaled or N if B is scaled. If A is being scaled, the scales should have shape [M, scale_k], while if B is scaled, it must have shape [N, scale_k]. The implementation only supports "group-wise" scales. However, we can make it work for per-column scales by setting the groups size equal to the gemm problem K. Limitations: 1) Only supported combinations are 16-bit x {8-bit, 4-bit, 2-bit} and {8-bit} x {4-bit, 2-bit}. 2) The narrow type must always be in K-major format. 3) The scales and zeros must be MN major. That means if A is scaled, it must be column major, but if B is scaled it must be row major. 4) The scales and the zeros must have the same layout and groupsize. 5) When dealing with 8-bit x {4-bit, 2-bit}, both inputs must be in K-major format. 6) Currently, TMA epilogues cannot be used when the narrow type is the B operand. This limitation arises because the implementation always swaps the operands to ensure that the narrow type passes through the register file, and TMA epilogues do not currently support implicit swap + transpose operations. We plan to address this limitation in the future. However, we address this in the example by explicitly swapping and transposing the operands. Examples: Runs the mixed input batched gemm (with batch size 2), converting B to the type of A (mode 0) $ ./examples/55_hopper_mixed_dtype_gemm/55_hopper_mixed_dtype_gemm --m=2048 --n=2048 --k=2048 --l=2 --mode=0 Runs the mixed input gemm, and applies a scaling factor to B before mma (mode 1). Applies a vector of scales to the entire matrix (group size is the same as the gemm k dimension). $ ./examples/55_hopper_mixed_dtype_gemm/55_hopper_mixed_dtype_gemm --m=4096 --n=5120 --k=8192 --g=8192 --mode=1 Runs the mixed input gemm, and applies a scaling factor and adds a zero-point to B before mma (mode 2). Uses a group size of 128. $ ./examples/55_hopper_mixed_dtype_gemm/55_hopper_mixed_dtype_gemm --m=2048 --n=5120 --k=8192 --g=128 --mode=2 */ #include <iostream> #include "cutlass/cutlass.h" #include "cute/tensor.hpp" #include "cutlass/tensor_ref.h" #include "cutlass/epilogue/collective/default_epilogue.hpp" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/gemm/collective/collective_builder.hpp" #include "cutlass/epilogue/collective/collective_builder.hpp" #include "cutlass/gemm/device/gemm_universal_adapter.h" #include "cutlass/gemm/kernel/gemm_universal.hpp" #include "cutlass/util/command_line.h" #include "cutlass/util/distribution.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/packed_stride.hpp" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/gett.hpp" #include "helper.h" #include "unfused_weight_dequantize.hpp" using namespace cute; #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) // This is just an example, so we use a regular enum so we can compare directly to the command-line int. enum GemmMode { ConvertOnly, ScaleOnly, ScaleWithZeroPoint }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// GEMM kernel configurations ///////////////////////////////////////////////////////////////////////////////////////////////// using MmaType = cutlass::float_e4m3_t; using QuantType = cutlass::int4b_t; constexpr int TileShapeK = 128 * 8 / sizeof_bits<MmaType>::value; // A matrix configuration using ElementA = MmaType; // Element type for A matrix operand using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes) // B matrix configuration using ElementB = QuantType; // Element type for B matrix operand using LayoutB = cutlass::layout::ColumnMajor; // Layout type for B matrix operand constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes) // This example manually swaps and transposes, so keep transpose of input layouts using LayoutA_Transpose = typename cutlass::layout::LayoutTranspose<LayoutA>::type; using LayoutB_Transpose = typename cutlass::layout::LayoutTranspose<LayoutB>::type; using ElementZero = cutlass::half_t; using ElementScale = cutlass::half_t; using LayoutScale = cutlass::layout::RowMajor; // C/D matrix configuration using ElementC = cutlass::half_t; // Element type for C and D matrix operands using LayoutC = cutlass::layout::RowMajor; // Layout type for C and D matrix operands constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value; // Memory access granularity/alignment of C matrix in units of elements (up to 16 bytes) // D matrix configuration using ElementD = ElementC; using LayoutD = LayoutC; constexpr int AlignmentD = 128 / cutlass::sizeof_bits<ElementD>::value; // Core kernel configurations using ElementAccumulator = float; // Element type for internal accumulation using ElementCompute = float; // Element type for epilogue computation using ArchTag = cutlass::arch::Sm90; // Tag indicating the minimum SM that supports the intended feature using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag using TileShape = Shape<_128,_256,cute::Int<TileShapeK>>; // Threadblock-level tile size using ClusterShape = Shape<_2,_1,_1>; // Shape of the threadblocks in a cluster using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedCooperativeMixedInput; // Kernel to launch based on the default setting in the Collective Builder using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape, ClusterShape, EpilogueTileType, ElementAccumulator, ElementAccumulator, // Transpose layout of D here since we use explicit swap + transpose // the void type for C tells the builder to allocate 0 smem for the C matrix. // We can enable this if beta == 0 by changing ElementC to void below. ElementC, typename cutlass::layout::LayoutTranspose<LayoutC>::type, AlignmentC, ElementD, typename cutlass::layout::LayoutTranspose<LayoutD>::type, AlignmentD, EpilogueSchedule // This is the only epi supporting the required swap + transpose. >::CollectiveOp; // ============================================================ MIXED INPUT NO SCALES ============================================================================ // The collective will infer that the narrow type should be upcasted to the wide type. // We swap A and B operands to the builder here using CollectiveMainloopConvertOnly = typename cutlass::gemm::collective::CollectiveBuilder< ArchTag, OperatorClass, ElementB, LayoutB_Transpose, AlignmentB, ElementA, LayoutA_Transpose, AlignmentA, ElementAccumulator, TileShape, ClusterShape, cutlass::gemm::collective::StageCountAutoCarveout< static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage)) >, KernelSchedule >::CollectiveOp; using GemmKernelConvertOnly = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, // Indicates ProblemShape CollectiveMainloopConvertOnly, CollectiveEpilogue >; using GemmConvertOnly = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelConvertOnly>; // =========================================================== MIXED INPUT WITH SCALES =========================================================================== // The Scale information must get paired with the operand that will be scaled. In this example, B is scaled so we make a tuple of B's information and the scale information. using CollectiveMainloopScaleOnly = typename cutlass::gemm::collective::CollectiveBuilder< ArchTag, OperatorClass, cute::tuple<ElementB, ElementScale>, LayoutB_Transpose, AlignmentB, ElementA, LayoutA_Transpose, AlignmentA, ElementAccumulator, TileShape, ClusterShape, cutlass::gemm::collective::StageCountAutoCarveout< static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage)) >, KernelSchedule >::CollectiveOp; using GemmKernelScaleOnly = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, // Indicates ProblemShape CollectiveMainloopScaleOnly, CollectiveEpilogue >; using GemmScaleOnly = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelScaleOnly>; // =========================================================== MIXED INPUT WITH SCALES AND ZEROS ================================================================== // We specify scale + zero elements to indicate that we require both. Scales and biases have the same format. using CollectiveMainloopScaleWithZeroPoint = typename cutlass::gemm::collective::CollectiveBuilder< ArchTag, OperatorClass, cute::tuple<ElementB, ElementScale, ElementZero>, LayoutB_Transpose, AlignmentB, ElementA, LayoutA_Transpose, AlignmentA, ElementAccumulator, TileShape, ClusterShape, cutlass::gemm::collective::StageCountAutoCarveout< static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage)) >, KernelSchedule >::CollectiveOp; using GemmKernelScaleWithZeroPoint = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, // Indicates ProblemShape CollectiveMainloopScaleWithZeroPoint, CollectiveEpilogue >; using GemmScaleWithZeroPoint = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelScaleWithZeroPoint>; // ================================================================================================================================================================= using StrideA = cutlass::detail::TagToStrideA_t<LayoutA>; using StrideB = cutlass::detail::TagToStrideB_t<LayoutB>; using StrideC = typename GemmKernelScaleWithZeroPoint::StrideC; using StrideD = typename GemmKernelScaleWithZeroPoint::StrideD; using StrideC_ref = cutlass::detail::TagToStrideC_t<LayoutC>; using StrideD_ref = cutlass::detail::TagToStrideC_t<LayoutD>; // // Data members // /// Initialization StrideA stride_A; StrideB stride_B; StrideC stride_C; StrideC_ref stride_C_ref; StrideD stride_D; StrideD_ref stride_D_ref; uint64_t seed; // Scale and Zero share a stride since the layout and shapes must be the same. using StrideS = typename CollectiveMainloopScaleWithZeroPoint::StrideScale; using StrideS_ref = cutlass::detail::TagToStrideB_t<LayoutScale>; StrideS stride_S; StrideS_ref stride_S_ref; cutlass::HostTensor<MmaType, LayoutA> tensor_A; cutlass::HostTensor<QuantType, LayoutB> tensor_B; cutlass::HostTensor<MmaType, LayoutB> tensor_B_dq; cutlass::HostTensor<ElementScale, LayoutScale> tensor_scale; cutlass::HostTensor<ElementZero, LayoutScale> tensor_zero; cutlass::HostTensor<ElementC, LayoutC> tensor_C; cutlass::HostTensor<typename GemmScaleWithZeroPoint::EpilogueOutputOp::ElementOutput, LayoutD> tensor_D; cutlass::HostTensor<typename GemmScaleWithZeroPoint::EpilogueOutputOp::ElementOutput, LayoutD> tensor_ref_D; #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) ///////////////////////////////////////////////////////////////////////////////////////////////// /// Testbed utility types ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help = false; float alpha = 1.0f; float beta = 0.0f; int iterations = 1000; int mode = 2; int m = 5120, n = 4096, k = 4096; int g = 128; int l = 1; // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; return; } cmd.get_cmd_line_argument("m", m); cmd.get_cmd_line_argument("n", n); cmd.get_cmd_line_argument("k", k); cmd.get_cmd_line_argument("l", l); cmd.get_cmd_line_argument("g", g); cmd.get_cmd_line_argument("mode", mode); cmd.get_cmd_line_argument("alpha", alpha, 1.f); cmd.get_cmd_line_argument("beta", beta, 0.f); cmd.get_cmd_line_argument("iterations", iterations); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "55_hopper_warp_specialized_gemm\n\n" << " Hopper FP32 GEMM using a Warp Specialized kernel.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement\n\n" << " --m=<int> Sets the M extent of the GEMM\n" << " --n=<int> Sets the N extent of the GEMM\n" << " --k=<int> Sets the K extent of the GEMM\n" << " --l=<int> The number of independent gemm problems with mnk shape\n" << " --g=<int> The size of each group for the scales and zeros. To broadcast a vector of scales or zeros, set the group size to K.\n" << " --mode=<int> The mode to run the gemm. 0 does (A @ B), 1 means A @ (scale * B), 2 means A @ (scale * B + zero-point).\n" << " --alpha=<f32> Epilogue scalar alpha\n" << " --beta=<f32> Epilogue scalar beta\n\n" << " --iterations=<int> Number of profiling iterations to perform.\n\n"; out << "\n\nExamples:\n\n" << "$ " << "55_hopper_warp_specialized_gemm" << " --m=1024 --n=512 --k=1024 -g 0 --l=10 --alpha=2 --mode=2 --beta=0.707 \n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Two flops per multiply-add uint64_t flop = uint64_t(2) * m * n * k * l; double gflop = double(flop) / double(1.0e9); return gflop / runtime_s; } }; /// Result structure struct Result { double avg_runtime_ms = 0.0; double gflops = 0.0; cutlass::Status status = cutlass::Status::kSuccess; cudaError_t error = cudaSuccess; bool passed = false; }; #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) ///////////////////////////////////////////////////////////////////////////////////////////////// /// GEMM setup and evaluation ///////////////////////////////////////////////////////////////////////////////////////////////// /// Helper to initialize a block of device data template <class Element, class Layout> bool initialize_tensor( cutlass::TensorView<Element, Layout> view, uint64_t seed=2023) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<Element>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillRandomUniform( view, seed, scope_max, scope_min); return true; } template <typename Element, typename Layout> bool initialize_quant_tensor( cutlass::TensorView<Element, Layout> view, uint64_t seed=2023) { float scope_min = float(cutlass::platform::numeric_limits<Element>::lowest()); float scope_max = float(cutlass::platform::numeric_limits<Element>::max()); cutlass::reference::host::TensorFillRandomUniform( view, seed, scope_max, scope_min); return true; } template <class Element, class Layout> bool initialize_scale( cutlass::TensorView<Element, Layout> view, const Options &options) { if (options.mode == GemmMode::ConvertOnly) { // No scales, so just initialize with 1 so we can use the same kernel to dequantize the data. cutlass::reference::host::TensorFill(view, Element(1.0f)); } else { float elt_max_f = float(cutlass::platform::numeric_limits<QuantType>::max()); const float max_dequant_val = 4.f; const float min_dequant_val = 0.5f; float scope_max(max_dequant_val / elt_max_f); float scope_min(min_dequant_val / elt_max_f); cutlass::reference::host::TensorFillRandomUniform( view, seed, scope_max, scope_min); } return true; } template <class Element, class Layout> bool initialize_zero( cutlass::TensorView<Element, Layout> view, const Options &options) { if (options.mode == GemmMode::ScaleWithZeroPoint) { cutlass::reference::host::TensorFillRandomUniform( view, seed, 2.0f, -2.0f); } else { // No bias, so just initialize with 1 so we can use the same kernel to dequantize the data. cutlass::reference::host::TensorFill(view, Element(0.0f)); } return true; } /// Initialize operands to be used in the GEMM and reference GEMM void initialize(const Options &options) { auto shape_b = cute::make_shape(options.n, options.k, options.l); const int scale_k = (options.k + options.g - 1) / options.g; stride_A = cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(options.m, options.k, options.l)); stride_B = cutlass::make_cute_packed_stride(StrideB{}, shape_b); // Reverse stride here due to swap and transpose stride_C = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(options.n, options.m, options.l)); stride_C_ref = cutlass::make_cute_packed_stride(StrideC_ref{}, cute::make_shape(options.m, options.n, options.l)); // Reverse stride here due to swap and transpose stride_D = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(options.n, options.m, options.l)); stride_D_ref = cutlass::make_cute_packed_stride(StrideD_ref{}, cute::make_shape(options.m, options.n, options.l)); auto a_coord = cutlass::make_Coord(options.m * options.l, options.k); auto b_coord = cutlass::make_Coord(options.k, options.n * options.l); auto c_coord = cutlass::make_Coord(options.m * options.l, options.n); tensor_A.resize(a_coord); tensor_B.resize(b_coord); tensor_B_dq.resize(b_coord); tensor_C.resize(c_coord); tensor_D.resize(c_coord); tensor_ref_D.resize(c_coord); tensor_scale.resize({scale_k * options.l, options.n}); tensor_zero.resize({scale_k * options.l, options.n}); initialize_tensor(tensor_A.host_view(), seed + 2022); initialize_quant_tensor(tensor_B.host_view(), seed + 2021); initialize_tensor(tensor_C.host_view(), seed + 2020); initialize_scale(tensor_scale.host_view(), options); initialize_zero(tensor_zero.host_view(), options); tensor_A.sync_device(); tensor_B.sync_device(); tensor_C.sync_device(); tensor_scale.sync_device(); tensor_zero.sync_device(); auto layout_B = make_layout(shape_b, stride_B); auto shape_scale_zero = cute::make_shape(options.n, scale_k, options.l); stride_S = cutlass::make_cute_packed_stride(StrideS{}, cute::make_shape(options.n, scale_k, options.l)); stride_S_ref = cutlass::make_cute_packed_stride(StrideS_ref{}, cute::make_shape(options.n, scale_k, options.l)); auto layout_scale_zero = make_layout(shape_scale_zero, stride_S_ref); dequantize_weight(tensor_B_dq.device_data(), tensor_B.device_data(), layout_B, tensor_scale.device_data(), tensor_zero.device_data(), layout_scale_zero, options.g); tensor_B_dq.sync_host(); } /// Populates a Gemm::Arguments structure from the given commandline options template <typename Args> Args args_from_options(const Options &options) { // Swap the A and B tensors, as well as problem shapes here. if (options.mode == GemmMode::ConvertOnly) { return Args { cutlass::gemm::GemmUniversalMode::kGemm, {options.n, options.m, options.k, options.l}, {tensor_B.device_data(), stride_B, tensor_A.device_data(), stride_A}, {{options.alpha, options.beta}, tensor_C.device_data(), stride_C, tensor_D.device_data(), stride_D} }; } else if (options.mode == GemmMode::ScaleOnly) { return Args { cutlass::gemm::GemmUniversalMode::kGemm, {options.n, options.m, options.k, options.l}, {tensor_B.device_data(), stride_B, tensor_A.device_data(), stride_A, tensor_scale.device_data(), stride_S, options.g}, {{options.alpha, options.beta}, tensor_C.device_data(), stride_C, tensor_D.device_data(), stride_D} }; } else if (options.mode == GemmMode::ScaleWithZeroPoint) { return Args { cutlass::gemm::GemmUniversalMode::kGemm, {options.n, options.m, options.k, options.l}, {tensor_B.device_data(), stride_B, tensor_A.device_data(), stride_A, tensor_scale.device_data(), stride_S, options.g, tensor_zero.device_data()}, {{options.alpha, options.beta}, tensor_C.device_data(), stride_C, tensor_D.device_data(), stride_D} }; } else { std::cerr << "Invalid mode " << options.mode << ". Must be 0, 1 or 2." << std::endl; exit(-1); } } bool verify(const Options &options) { // // Compute reference output // // In this example, we use the GPU default kernels as a reference (unfused scale) // This is to avoid numerical differences from different accumulation order. // Again, due to numerical differences, we must use fast acc here when the mma type is // FP8 as the fused implementation only supports fast acc at the moment. constexpr bool IsFP8Input = cute::is_same_v<MmaType, cutlass::float_e4m3_t> || cute::is_same_v<MmaType, cutlass::float_e5m2_t>; using FP8Sched = cute::conditional_t<size<0>(TileShape{}) == 64, cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum, cutlass::gemm::KernelTmaWarpSpecializedCooperativeFP8FastAccum>; using ScheduleRef = cute::conditional_t<IsFP8Input, FP8Sched, cutlass::gemm::collective::KernelScheduleAuto>; using CollectiveMainloopRef = typename cutlass::gemm::collective::CollectiveBuilder< ArchTag, OperatorClass, MmaType, LayoutA, AlignmentA, MmaType, LayoutB, AlignmentB, ElementAccumulator, TileShape, ClusterShape, cutlass::gemm::collective::StageCountAuto, ScheduleRef >::CollectiveOp; using CollectiveEpilogueRef = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape, ClusterShape, cutlass::epilogue::collective::EpilogueTileAuto, ElementAccumulator, ElementAccumulator, ElementC, LayoutC, AlignmentC, ElementD, LayoutD, AlignmentD, cutlass::epilogue::NoSmemWarpSpecialized >::CollectiveOp; using GemmKernelRef = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, // Indicates ProblemShape CollectiveMainloopRef, CollectiveEpilogueRef >; using GemmRef = cutlass::gemm::device::GemmUniversalAdapter<GemmKernelRef>; typename GemmRef::Arguments arguments{ cutlass::gemm::GemmUniversalMode::kGemm, {options.m, options.n, options.k, options.l}, {tensor_A.device_data(), stride_A, tensor_B_dq.device_data(), stride_B}, {{options.alpha, options.beta}, tensor_C.device_data(), stride_C_ref, tensor_ref_D.device_data(), stride_D_ref} }; // Run the gemm where the scaling is performed outside of the kernel. GemmRef gemm_ref; size_t workspace_size = GemmRef::get_workspace_size(arguments); cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); CUTLASS_CHECK(gemm_ref.can_implement(arguments)); CUTLASS_CHECK(gemm_ref.initialize(arguments, workspace.get())); CUTLASS_CHECK(gemm_ref.run()); // compare_reference tensor_D.sync_host(); tensor_ref_D.sync_host(); const ElementD epsilon(1e-2f); const ElementD non_zero_floor(1e-4f); bool passed = cutlass::reference::host::TensorRelativelyEquals(tensor_ref_D.host_view(), tensor_D.host_view(), epsilon, non_zero_floor); return passed; } /// Execute a given example GEMM computation template <typename Gemm> int run(Options &options) { initialize(options); // Instantiate CUTLASS kernel depending on templates Gemm gemm; // Create a structure of gemm kernel arguments suitable for invoking an instance of Gemm auto arguments = args_from_options<typename Gemm::Arguments>(options); // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Check if the problem size is supported or not CUTLASS_CHECK(gemm.can_implement(arguments)); // Initialize CUTLASS kernel with arguments and workspace pointer CUTLASS_CHECK(gemm.initialize(arguments, workspace.get())); // Correctness / Warmup iteration CUTLASS_CHECK(gemm.run()); // Check if output from CUTLASS kernel and reference kernel are equal or not Result result; result.passed = verify(options); std::cout << " Disposition: " << (result.passed ? "Passed" : "Failed") << std::endl; if (!result.passed) { exit(-1); } // Run profiling loop if (options.iterations > 0) { GpuTimer timer; timer.start(); for (int iter = 0; iter < options.iterations; ++iter) { CUTLASS_CHECK(gemm.run()); } timer.stop(); // Compute average runtime and GFLOPs. float elapsed_ms = timer.elapsed_millis(); result.avg_runtime_ms = double(elapsed_ms) / double(options.iterations); result.gflops = options.gflops(result.avg_runtime_ms / 1000.0); std::cout << " Problem Size: " << options.m << 'x' << options.n << 'x' << options.k << 'x' << options.l << std::endl; std::cout << " Avg runtime: " << result.avg_runtime_ms << " ms" << std::endl; std::cout << " GFLOPS: " << result.gflops << std::endl; } return 0; } #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) /////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { // CUTLASS must be compiled with CUDA 12.0 Toolkit to run this example // and must have compute capability at least 90. if (__CUDACC_VER_MAJOR__ < 12) { std::cerr << "This example requires CUDA 12 or newer.\n"; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } cudaDeviceProp props; int current_device_id; CUDA_CHECK(cudaGetDevice(&current_device_id)); CUDA_CHECK(cudaGetDeviceProperties(&props, current_device_id)); cudaError_t error = cudaGetDeviceProperties(&props, 0); if (props.major < 9) { std::cerr << "This example requires a GPU of NVIDIA's Hopper Architecture or " << "later (compute capability 90 or greater).\n"; return 0; } // // Parse options // Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } // // Evaluate CUTLASS kernels // #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) if (options.mode == GemmMode::ConvertOnly) { std::cout << "Running in no scale mode." << std::endl; run<GemmConvertOnly>(options); } else if (options.mode == GemmMode::ScaleOnly) { if (options.g == options.k) { std::cout << "Running in per-column scale mode." << std::endl; } else { std::cout << "Running in group scale mode." << std::endl; } run<GemmScaleOnly>(options); } else if (options.mode == GemmMode::ScaleWithZeroPoint) { if (options.g == options.k) { std::cout << "Running in per-column scale and zero mode." << std::endl; } else { std::cout << "Running in group scale and zero mode." << std::endl; } run<GemmScaleWithZeroPoint>(options); } #endif return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/55_hopper_mixed_dtype_gemm/55_hopper_mixed_dtype_gemm.cu/0
{ "file_path": "cutlass/examples/55_hopper_mixed_dtype_gemm/55_hopper_mixed_dtype_gemm.cu", "repo_id": "cutlass", "token_count": 11002 }
12
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/arch/copy.hpp> // Config #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)) # define CUTE_ARCH_CP_ASYNC_SM80_ENABLED #endif namespace cute { /// Copy via cp.async with caching at all levels template <class TS, class TD = TS> struct SM80_CP_ASYNC_CACHEALWAYS { using SRegisters = TS[1]; using DRegisters = TD[1]; static_assert(sizeof(TS) == sizeof(TD), "cp.async requires sizeof(src_value_type) == sizeof(dst_value_type)"); static_assert(sizeof(TS) == 4 || sizeof(TS) == 8 || sizeof(TS) == 16, "cp.async sizeof(TS) is not supported"); CUTE_HOST_DEVICE static void copy(TS const& gmem_src, TD & smem_dst) { #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) TS const* gmem_ptr = &gmem_src; uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst); asm volatile("cp.async.ca.shared.global.L2::128B [%0], [%1], %2;\n" :: "r"(smem_int_ptr), "l"(gmem_ptr), "n"(sizeof(TS))); #else CUTE_INVALID_CONTROL_PATH("Support for cp.async instructions has not been enabled"); #endif } }; /// Copy via cp.async with caching at global level template <class TS, class TD = TS> struct SM80_CP_ASYNC_CACHEGLOBAL { using SRegisters = TS[1]; using DRegisters = TD[1]; static_assert(sizeof(TS) == sizeof(TD), "cp.async requires sizeof(src_value_type) == sizeof(dst_value_type)"); static_assert(sizeof(TS) == 4 || sizeof(TS) == 8 || sizeof(TS) == 16, "cp.async sizeof(TS) is not supported"); CUTE_HOST_DEVICE static void copy(TS const& gmem_src, TD & smem_dst) { #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) TS const* gmem_ptr = &gmem_src; uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst); asm volatile("cp.async.cg.shared.global.L2::128B [%0], [%1], %2;\n" :: "r"(smem_int_ptr), "l"(gmem_ptr), "n"(sizeof(TS))); #else CUTE_INVALID_CONTROL_PATH("Support for cp.async instructions has not been enabled"); #endif } }; /// Copy via cp.async with caching at all levels template <class TS, class TD = TS> struct SM80_CP_ASYNC_CACHEALWAYS_ZFILL { using SRegisters = TS[1]; using DRegisters = TD[1]; static_assert(sizeof(TS) == sizeof(TD), "cp.async requires sizeof(src_value_type) == sizeof(dst_value_type)"); static_assert(sizeof(TS) == 4 || sizeof(TS) == 8 || sizeof(TS) == 16, "cp.async sizeof(TS) is not supported"); CUTE_HOST_DEVICE static void copy(TS const& gmem_src, TD & smem_dst, bool pred) { #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) TS const* gmem_ptr = &gmem_src; uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst); int src_size = pred ? sizeof(TS) : 0; asm volatile("cp.async.ca.shared.global.L2::128B [%0], [%1], %2, %3;\n" :: "r"(smem_int_ptr), "l"(gmem_ptr), "n"(sizeof(TS)), "r"(src_size)); #else CUTE_INVALID_CONTROL_PATH("Support for cp.async instructions has not been enabled"); #endif } }; /// Copy via cp.async with caching at global level template <class TS, class TD = TS> struct SM80_CP_ASYNC_CACHEGLOBAL_ZFILL { using SRegisters = TS[1]; using DRegisters = TD[1]; static_assert(sizeof(TS) == sizeof(TD), "cp.async requires sizeof(src_value_type) == sizeof(dst_value_type)"); static_assert(sizeof(TS) == 4 || sizeof(TS) == 8 || sizeof(TS) == 16, "cp.async sizeof(TS) is not supported"); CUTE_HOST_DEVICE static void copy(TS const& gmem_src, TD & smem_dst, bool pred) { #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) TS const* gmem_ptr = &gmem_src; uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst); int src_size = pred ? sizeof(TS) : 0; asm volatile("cp.async.cg.shared.global.L2::128B [%0], [%1], %2, %3;\n" :: "r"(smem_int_ptr), "l"(gmem_ptr), "n"(sizeof(TS)), "r"(src_size)); #else CUTE_INVALID_CONTROL_PATH("Support for cp.async instructions has not been enabled"); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// Establishes an ordering w.r.t previously issued cp.async instructions. Does not block. CUTE_HOST_DEVICE void cp_async_fence() { #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) asm volatile("cp.async.commit_group;\n" ::); #endif } //////////////////////////////////////////////////////////////////////////////////////////////////// /// Blocks until all but N previous cp.async.commit_group operations have committed. template <int N> CUTE_HOST_DEVICE void cp_async_wait() { #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) if constexpr (N == 0) { asm volatile("cp.async.wait_all;\n" ::); } else { asm volatile("cp.async.wait_group %0;\n" :: "n"(N)); } #endif } template <int N> CUTE_HOST_DEVICE void cp_async_wait(Int<N>) { return cp_async_wait<N>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// } // end namespace cute
cutlass/include/cute/arch/copy_sm80.hpp/0
{ "file_path": "cutlass/include/cute/arch/copy_sm80.hpp", "repo_id": "cutlass", "token_count": 2592 }
13
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/numeric/integral_constant.hpp> #include <cute/util/type_traits.hpp> namespace cute { template <class T, size_t N> struct array { using element_type = T; using value_type = remove_cv_t<T>; using size_type = size_t; using difference_type = ptrdiff_t; using reference = element_type&; using const_reference = const element_type&; using pointer = element_type*; using const_pointer = const element_type*; using iterator = pointer; using const_iterator = const_pointer; CUTE_HOST_DEVICE constexpr reference operator[](size_type pos) { return begin()[pos]; } CUTE_HOST_DEVICE constexpr const_reference operator[](size_type pos) const { return begin()[pos]; } CUTE_HOST_DEVICE constexpr reference front() { return *begin(); } CUTE_HOST_DEVICE constexpr const_reference front() const { return *begin(); } CUTE_HOST_DEVICE constexpr reference back() { // return *rbegin(); return operator[](N-1); } CUTE_HOST_DEVICE constexpr const_reference back() const { // return *rbegin(); return operator[](N-1); } CUTE_HOST_DEVICE constexpr T* data() { return __elems_; } CUTE_HOST_DEVICE constexpr T const* data() const { return __elems_; } CUTE_HOST_DEVICE constexpr iterator begin() { return data(); } CUTE_HOST_DEVICE constexpr const_iterator begin() const { return data(); } CUTE_HOST_DEVICE constexpr const_iterator cbegin() { return begin(); } CUTE_HOST_DEVICE constexpr const_iterator cbegin() const { return begin(); } CUTE_HOST_DEVICE constexpr iterator end() { return data() + size(); } CUTE_HOST_DEVICE constexpr const_iterator end() const { return data() + size(); } CUTE_HOST_DEVICE constexpr const_iterator cend() { return end(); } CUTE_HOST_DEVICE constexpr const_iterator cend() const { return end(); } CUTE_HOST_DEVICE constexpr bool empty() const { return size() == 0; } CUTE_HOST_DEVICE constexpr size_type size() const { return N; } CUTE_HOST_DEVICE constexpr size_type max_size() const { return size(); } CUTE_HOST_DEVICE constexpr void fill(const T& value) { for (auto& e : *this) { e = value; } } CUTE_HOST_DEVICE constexpr void clear() { fill(T(0)); } CUTE_HOST_DEVICE constexpr void swap(array& other) { using CUTE_STL_NAMESPACE::swap; for (size_type i = 0; i < size(); ++i) { swap((*this)[i], other[i]); } } element_type __elems_[N]; }; template <class T> struct array<T, 0> { using element_type = T; using value_type = remove_cv_t<T>; using size_type = size_t; using difference_type = ptrdiff_t; using reference = element_type&; using const_reference = const element_type&; using pointer = element_type*; using const_pointer = const element_type*; using const_iterator = const_pointer; using iterator = pointer; CUTE_HOST_DEVICE constexpr reference operator[](size_type pos) { return begin()[pos]; } CUTE_HOST_DEVICE constexpr const_reference operator[](size_type pos) const { return begin()[pos]; } CUTE_HOST_DEVICE constexpr reference front() { return *begin(); } CUTE_HOST_DEVICE constexpr const_reference front() const { return *begin(); } CUTE_HOST_DEVICE constexpr reference back() { return *begin(); } CUTE_HOST_DEVICE constexpr const_reference back() const { return *begin(); } CUTE_HOST_DEVICE constexpr T* data() { return nullptr; } CUTE_HOST_DEVICE constexpr T const* data() const { return nullptr; } CUTE_HOST_DEVICE constexpr iterator begin() { return nullptr; } CUTE_HOST_DEVICE constexpr const_iterator begin() const { return nullptr; } CUTE_HOST_DEVICE constexpr const_iterator cbegin() { return nullptr; } CUTE_HOST_DEVICE constexpr const_iterator cbegin() const { return nullptr; } CUTE_HOST_DEVICE constexpr iterator end() { return nullptr; } CUTE_HOST_DEVICE constexpr const_iterator end() const { return nullptr; } CUTE_HOST_DEVICE constexpr const_iterator cend() { return nullptr; } CUTE_HOST_DEVICE constexpr const_iterator cend() const { return nullptr; } CUTE_HOST_DEVICE constexpr bool empty() const { return true; } CUTE_HOST_DEVICE constexpr size_type size() const { return 0; } CUTE_HOST_DEVICE constexpr size_type max_size() const { return 0; } CUTE_HOST_DEVICE constexpr void fill(const T& value) {} CUTE_HOST_DEVICE constexpr void clear() {} CUTE_HOST_DEVICE constexpr void swap(array& other) {} }; template <class T, size_t N> CUTE_HOST_DEVICE constexpr bool operator==(array<T,N> const& lhs, array<T,N> const& rhs) { for (size_t i = 0; i < N; ++i) { if (lhs[i] != rhs[i]) { return false; } } return true; } template <class T, size_t N> CUTE_HOST_DEVICE constexpr void clear(array<T,N>& a) { a.fill(T(0)); } template <class T, size_t N> CUTE_HOST_DEVICE constexpr void fill(array<T,N>& a, T const& value) { a.fill(value); } template <class T, size_t N> CUTE_HOST_DEVICE constexpr void swap(array<T,N>& a, array<T,N>& b) { a.swap(b); } /// @return A cute::array of the elements of @c t in reverse order. template <class T, size_t N> CUTE_HOST_DEVICE constexpr cute::array<T,N> reverse(cute::array<T,N> const& t) { if constexpr (N == 0u) { return t; } else { cute::array<T,N> t_r{}; for (size_t k = 0; k < N; ++k) { t_r[k] = t[N - k - 1]; } return t_r; } } } // end cute // // Specialize tuple-related functionality for cute::array // #if defined(__CUDACC_RTC__) #include <cuda/std/tuple> #else #include <tuple> #endif namespace cute { template <size_t I, class T, size_t N> CUTE_HOST_DEVICE constexpr T& get(array<T,N>& a) { static_assert(I < N, "Index out of range"); return a[I]; } template <size_t I, class T, size_t N> CUTE_HOST_DEVICE constexpr T const& get(array<T,N> const& a) { static_assert(I < N, "Index out of range"); return a[I]; } template <size_t I, class T, size_t N> CUTE_HOST_DEVICE constexpr T&& get(array<T,N>&& a) { static_assert(I < N, "Index out of range"); return cute::move(a[I]); } } // end namespace cute namespace CUTE_STL_NAMESPACE { template <class T, size_t N> struct tuple_size<cute::array<T,N>> : CUTE_STL_NAMESPACE::integral_constant<size_t, N> {}; template <size_t I, class T, size_t N> struct tuple_element<I, cute::array<T,N>> { using type = T; }; template <class T, size_t N> struct tuple_size<cute::array<T,N> const> : CUTE_STL_NAMESPACE::integral_constant<size_t, N> {}; template <size_t I, class T, size_t N> struct tuple_element<I, cute::array<T,N> const> { using type = T; }; } // end namespace CUTE_STL_NAMESPACE #ifdef CUTE_STL_NAMESPACE_IS_CUDA_STD namespace std { #if defined(__CUDACC_RTC__) template <class... _Tp> struct tuple_size; template <size_t _Ip, class... _Tp> struct tuple_element; #endif template <class T, size_t N> struct tuple_size<cute::array<T,N>> : CUTE_STL_NAMESPACE::integral_constant<size_t, N> {}; template <size_t I, class T, size_t N> struct tuple_element<I, cute::array<T,N>> { using type = T; }; template <class T, size_t N> struct tuple_size<cute::array<T,N> const> : CUTE_STL_NAMESPACE::integral_constant<size_t, N> {}; template <size_t I, class T, size_t N> struct tuple_element<I, cute::array<T,N> const> { using type = T; }; } // end namespace std #endif // CUTE_STL_NAMESPACE_IS_CUDA_STD
cutlass/include/cute/container/array.hpp/0
{ "file_path": "cutlass/include/cute/container/array.hpp", "repo_id": "cutlass", "token_count": 3658 }
14
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/util/type_traits.hpp> namespace cute { // // Common Operations // template <class T, class U, __CUTE_REQUIRES(is_arithmetic<T>::value && is_arithmetic<U>::value)> CUTE_HOST_DEVICE constexpr auto max(T const& t, U const& u) { return t < u ? u : t; } template <class T, class U, __CUTE_REQUIRES(is_arithmetic<T>::value && is_arithmetic<U>::value)> CUTE_HOST_DEVICE constexpr auto min(T const& t, U const& u) { return t < u ? t : u; } template <class T, __CUTE_REQUIRES(is_arithmetic<T>::value)> CUTE_HOST_DEVICE constexpr auto abs(T const& t) { if constexpr (is_signed<T>::value) { return t < T(0) ? -t : t; } else { return t; } CUTE_GCC_UNREACHABLE; } // Returns 1 if x > 0, -1 if x < 0, and 0 if x is zero. template <class T, __CUTE_REQUIRES(is_arithmetic<T>::value)> CUTE_HOST_DEVICE constexpr int signum(T const& x) { if constexpr (is_signed<T>::value) { return (T(0) < x) - (x < T(0)); } else { return T(0) < x; } CUTE_GCC_UNREACHABLE; } // // C++17 <numeric> operations // // Greatest common divisor of two positive integers template <class T, class U, __CUTE_REQUIRES(is_std_integral<T>::value && is_std_integral<U>::value)> CUTE_HOST_DEVICE constexpr cute::common_type_t<T, U> gcd(T t, U u) { while (true) { if (t == 0) { return u; } u %= t; if (u == 0) { return t; } t %= u; } } // Least common multiple of two positive integers template <class T, class U, __CUTE_REQUIRES(is_std_integral<T>::value && is_std_integral<U>::value)> CUTE_HOST_DEVICE constexpr cute::common_type_t<T, U> lcm(T const& t, U const& u) { return (t / gcd(t,u)) * u; } // // C++20 <bit> operations // // Checks if a number is an integral power of two template <class T> CUTE_HOST_DEVICE constexpr bool has_single_bit(T x) { return x != 0 && (x & (x - 1)) == 0; } // Smallest number of bits needed to represent the given value // For x == 0, this is 0 // For x != 0, this is 1 + floor(log2(x)) // bit_width( 0b0000 ) = 0 // bit_width( 0b0001 ) = 1 // bit_width( 0b0010 ) = 2 // bit_width( 0b0011 ) = 2 // bit_width( 0b0100 ) = 3 // bit_width( 0b0101 ) = 3 // bit_width( 0b0110 ) = 3 // bit_width( 0b0111 ) = 3 template <class T> CUTE_HOST_DEVICE constexpr T bit_width(T x) { static_assert(is_unsigned<T>::value, "Only to be used for unsigned types."); constexpr int N = (numeric_limits<T>::digits == 64 ? 6 : (numeric_limits<T>::digits == 32 ? 5 : (numeric_limits<T>::digits == 16 ? 4 : (numeric_limits<T>::digits == 8 ? 3 : (assert(false),0))))); T r = 0; for (int i = N - 1; i >= 0; --i) { T shift = (x > ((T(1) << (T(1) << i))-1)) << i; x >>= shift; r |= shift; } return r + (x != 0); } // Smallest integral power of two not less than the given value // bit_ceil( 0b00000000 ) = 0b00000001 // bit_ceil( 0b00000001 ) = 0b00000001 // bit_ceil( 0b00000010 ) = 0b00000010 // bit_ceil( 0b00000011 ) = 0b00000100 // bit_ceil( 0b00000100 ) = 0b00000100 // bit_ceil( 0b00000101 ) = 0b00001000 // bit_ceil( 0b00000110 ) = 0b00001000 // bit_ceil( 0b00000111 ) = 0b00001000 // bit_ceil( 0b00001000 ) = 0b00001000 // bit_ceil( 0b00001001 ) = 0b00010000 template <class T> CUTE_HOST_DEVICE constexpr T bit_ceil(T x) { return x == 0 ? T(1) : (T(1) << bit_width(x - 1)); } // Largest integral power of two not greater than the given value // bit_floor( 0b00000000 ) = 0b00000000 // bit_floor( 0b00000001 ) = 0b00000001 // bit_floor( 0b00000010 ) = 0b00000010 // bit_floor( 0b00000011 ) = 0b00000010 // bit_floor( 0b00000100 ) = 0b00000100 // bit_floor( 0b00000101 ) = 0b00000100 // bit_floor( 0b00000110 ) = 0b00000100 // bit_floor( 0b00000111 ) = 0b00000100 // bit_floor( 0b00001000 ) = 0b00001000 // bit_floor( 0b00001001 ) = 0b00001000 template <class T> CUTE_HOST_DEVICE constexpr T bit_floor(T x) { return x == 0 ? 0 : (T(1) << (bit_width(x) - 1)); } template <class T> CUTE_HOST_DEVICE constexpr T rotl(T x, int s); template <class T> CUTE_HOST_DEVICE constexpr T rotr(T x, int s); // Computes the result of circular bitwise left-rotation template <class T> CUTE_HOST_DEVICE constexpr T rotl(T x, int s) { constexpr int N = numeric_limits<T>::digits; return static_cast<T>(s == 0 ? x : s > 0 ? (x << s) | (x >> (N - s)) : rotr(x, -s)); } // Computes the result of circular bitwise right-rotation template <class T> CUTE_HOST_DEVICE constexpr T rotr(T x, int s) { constexpr int N = numeric_limits<T>::digits; return static_cast<T>(s == 0 ? x : s > 0 ? (x >> s) | (x << (N - s)) : rotl(x, -s)); } // Counts the number of consecutive 0 bits, starting from the most significant bit // countl_zero( 0b00000000 ) = 8 // countl_zero( 0b11111111 ) = 0 // countl_zero( 0b00011100 ) = 3 template <class T> CUTE_HOST_DEVICE constexpr T countl_zero(T x) { return numeric_limits<T>::digits - bit_width(x); } // Counts the number of consecutive 1 bits, starting from the most significant bit // countl_one( 0b00000000 ) = 0 // countl_one( 0b11111111 ) = 8 // countl_one( 0b11100011 ) = 3 template <class T> CUTE_HOST_DEVICE constexpr T countl_one(T x) { return countl_zero(~x); } // Counts the number of consecutive 0 bits, starting from the least significant bit // countr_zero( 0b00000000 ) = 8 // countr_zero( 0b11111111 ) = 0 // countr_zero( 0b00011100 ) = 2 template <class T> CUTE_HOST_DEVICE constexpr T countr_zero(T x) { return x == 0 ? numeric_limits<T>::digits : bit_width(T(x & T(-x))) - 1; // bit_width of the LSB } // Counts the number of consecutive 1 bits, starting from the least significant bit // countr_one( 0b00000000 ) = 0 // countr_one( 0b11111111 ) = 8 // countr_one( 0b11100011 ) = 2 template <class T> CUTE_HOST_DEVICE constexpr T countr_one(T x) { return countr_zero(~x); } // Counts the number of 1 bits in an unsigned integer // popcount( 0b00000000 ) = 0 // popcount( 0b11111111 ) = 8 // popcount( 0b00011101 ) = 4 template <class T> CUTE_HOST_DEVICE constexpr int popcount(T x) { int c = 0; while (x) { ++c; x &= x - 1; // clear the least significant bit set } return c; } // // Custom operations // // Computes the result of bitwise left-shift template <class T> CUTE_HOST_DEVICE constexpr T shiftl(T x, int s) { return s >= 0 ? (x << s) : (x >> -s); } // Computes the result of bitwise right-shift template <class T> CUTE_HOST_DEVICE constexpr T shiftr(T x, int s) { return s >= 0 ? (x >> s) : (x << -s); } // Safe divide // @pre t % u == 0 // @result t / u template <class T, class U, __CUTE_REQUIRES(is_std_integral<T>::value && is_std_integral<U>::value)> CUTE_HOST_DEVICE constexpr auto safe_div(T const& t, U const& u) { //assert(t % u == 0); return t / u; } /** * log2 computation */ template <class T> CUTE_HOST_DEVICE constexpr int32_t log_2(T x) { assert(x > 0); static_assert(is_unsigned<T>::value, "Only to be used for unsigned integral types."); return static_cast<int32_t>(bit_width(x)) - 1; } } // namespace cute
cutlass/include/cute/numeric/math.hpp/0
{ "file_path": "cutlass/include/cute/numeric/math.hpp", "repo_id": "cutlass", "token_count": 3578 }
15
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Sparse matrix multiply accumulate for SM80 */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "mma.h" #include "cutlass/layout/matrix.h" #include "cutlass/numeric_types.h" ///////////////////////////////////////////////////////////////////////////////////////////////// #if ((__CUDACC_VER_MAJOR__ > 11) || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1)) #define CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED 1 #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)) #define CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED #endif #endif ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace arch { ///////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // // Sparse Matrix Multiply 16832 // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: F16 = F16 * F16 + F16 template <> struct SparseMma< gemm::GemmShape<16, 8, 32>, 32, half_t, layout::RowMajor, half_t, layout::ColumnMajor, half_t, layout::RowMajor, OpMultiplyAdd, SPFormatType::Thread > { using Shape = gemm::GemmShape<16, 8, 32>; using ElementA = half_t; using LayoutA = layout::RowMajor; using FragmentA = Array<half_t, 8>; using ElementB = half_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<half_t, 8>; using ElementC = half_t; using LayoutC = layout::RowMajor; using FragmentC = Array<half_t, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 2; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); uint32_t const *C = reinterpret_cast<uint32_t const *>(&c); uint32_t *D = reinterpret_cast<uint32_t *>(&d); if (id2 == 0) { asm volatile( "mma.sp.sync.aligned.m16n8k32.row.col.f16.f16.f16.f16 {%0,%1}, " "{%2,%3,%4,%5}, {%6,%7,%8,%9}, {%10,%11}, %12, 0x0;\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(E)); } else if (id2 == 1) { asm volatile( "mma.sp.sync.aligned.m16n8k32.row.col.f16.f16.f16.f16 {%0,%1}, " "{%2,%3,%4,%5}, {%6,%7,%8,%9}, {%10,%11}, %12, 0x1;\n" : "=r"(D[0]), "=r"(D[1]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(E)); } else { assert(0); } #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: F32 = F16 * F16 + F32 template <> struct SparseMma< gemm::GemmShape<16, 8, 32>, 32, half_t, layout::RowMajor, half_t, layout::ColumnMajor, float, layout::RowMajor, OpMultiplyAdd, SPFormatType::Thread > { using Shape = gemm::GemmShape<16, 8, 32>; using ElementA = half_t; using LayoutA = layout::RowMajor; using FragmentA = Array<half_t, 8>; using ElementB = half_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<half_t, 8>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 2; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); if (id2 == 0) { asm volatile( "mma.sp.sync.aligned.m16n8k32.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, " "{%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E)); } else if (id2 == 1) { asm volatile( "mma.sp.sync.aligned.m16n8k32.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, " "{%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E)); } else { assert(0); } #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; //////////////////////////////////////////////////////////////////////////////// // // Sparse Matrix Multiply 16832 - Float BF16, FP32 accumulation // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: F32 = bf16 * bf16 + F32 template <> struct SparseMma<gemm::GemmShape<16, 8, 32>, 32, bfloat16_t, layout::RowMajor, bfloat16_t, layout::ColumnMajor, float, layout::RowMajor, OpMultiplyAdd, SPFormatType::Thread> { using Shape = gemm::GemmShape<16, 8, 32>; using ElementA = bfloat16_t; using LayoutA = layout::RowMajor; using FragmentA = Array<bfloat16_t, 8>; using ElementB = bfloat16_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<bfloat16_t, 8>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 2; CUTLASS_HOST_DEVICE void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); if (id2 == 0) { asm volatile( "mma.sp.sync.aligned.m16n8k32.row.col.f32.bf16.bf16.f32 " "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E)); } else if (id2 == 1) { asm volatile( "mma.sp.sync.aligned.m16n8k32.row.col.f32.bf16.bf16.f32 " "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E)); } else { assert(0); } #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; //////////////////////////////////////////////////////////////////////////////// // // Sparse Matrix Multiply 16816 - Float TF32 // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: F32 = tf32 * tf32 + F32 template <> struct SparseMma<gemm::GemmShape<16, 8, 16>, 32, tfloat32_t, layout::RowMajor, tfloat32_t, layout::ColumnMajor, float, layout::RowMajor, OpMultiplyAdd, SPFormatType::Thread> { using Shape = gemm::GemmShape<16, 8, 16>; using ElementA = tfloat32_t; using LayoutA = layout::RowMajor; using FragmentA = Array<tfloat32_t, 4>; using ElementB = tfloat32_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<tfloat32_t, 4>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 4; static int const kMaxID2 = 2; CUTLASS_HOST_DEVICE void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); if (id2 == 0) { asm volatile( "mma.sp.sync.aligned.m16n8k16.row.col.f32.tf32.tf32.f32 " "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E)); } else if (id2 == 1) { asm volatile( "mma.sp.sync.aligned.m16n8k16.row.col.f32.tf32.tf32.f32 " "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E)); } else { assert(0); } #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; //////////////////////////////////////////////////////////////////////////////// // // Sparse Matrix Multiply 16864 - S8 input, S32 accumulation // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: S32 = S8 * S8 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,64>, 32, int8_t, layout::RowMajor, int8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,64>; using ElementA = int8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<int8_t, 16>; using ElementB = int8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<int8_t, 16>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k64.row.col.s32.s8.s8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; /// Matrix multiply-add operation: S32 = S8 * U8 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,64>, 32, int8_t, layout::RowMajor, uint8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,64>; using ElementA = int8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<int8_t, 16>; using ElementB = uint8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<uint8_t, 16>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k64.row.col.s32.s8.u8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; /// Matrix multiply-add operation: S32 = U8 * S8 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,64>, 32, uint8_t, layout::RowMajor, int8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,64>; using ElementA = uint8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<uint8_t, 16>; using ElementB = int8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<int8_t, 16>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k64.row.col.s32.u8.s8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; /// Matrix multiply-add operation: S32 = U8 * U8 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,64>, 32, uint8_t, layout::RowMajor, uint8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,64>; using ElementA = uint8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<uint8_t, 16>; using ElementB = uint8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<uint8_t, 16>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k64.row.col.s32.u8.u8.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; //////////////////////////////////////////////////////////////////////////////// // // Sparse Matrix Multiply 16864 - S8 input, S32 accumulation - SATURATE // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: S32 = S8 * S8 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,64>, 32, int8_t, layout::RowMajor, int8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,64>; using ElementA = int8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<int8_t, 16>; using ElementB = int8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<int8_t, 16>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k64.row.col.s32.s8.s8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; /// Matrix multiply-add operation: S32 = S8 * U8 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,64>, 32, int8_t, layout::RowMajor, uint8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,64>; using ElementA = int8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<int8_t, 16>; using ElementB = uint8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<uint8_t, 16>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k64.row.col.s32.s8.u8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; /// Matrix multiply-add operation: S32 = U8 * S8 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,64>, 32, uint8_t, layout::RowMajor, int8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,64>; using ElementA = uint8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<uint8_t, 16>; using ElementB = int8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<int8_t, 16>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k64.row.col.s32.u8.s8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; /// Matrix multiply-add operation: S32 = U8 * U8 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,64>, 32, uint8_t, layout::RowMajor, uint8_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,64>; using ElementA = uint8_t; using LayoutA = layout::RowMajor; using FragmentA = Array<uint8_t, 16>; using ElementB = uint8_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<uint8_t, 16>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k64.row.col.s32.u8.u8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; //////////////////////////////////////////////////////////////////////////////// // // Sparse Matrix Multiply 168128 - S4 input, S32 accumulation // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: S32 = S4 * S4 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,128>, 32, cutlass::int4b_t, layout::RowMajor, cutlass::int4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,128>; using ElementA = cutlass::int4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<cutlass::int4b_t, 32>; using ElementB = cutlass::int4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<cutlass::int4b_t, 32>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k128.row.col.s32.s4.s4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; /// Matrix multiply-add operation: S32 = S4 * U4 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,128>, 32, cutlass::int4b_t, layout::RowMajor, cutlass::uint4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,128>; using ElementA = cutlass::int4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<cutlass::int4b_t, 32>; using ElementB = cutlass::uint4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<cutlass::uint4b_t, 32>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k128.row.col.s32.s4.u4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; /// Matrix multiply-add operation: S32 = U4 * S4 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,128>, 32, cutlass::uint4b_t, layout::RowMajor, cutlass::int4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,128>; using ElementA = cutlass::uint4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<cutlass::uint4b_t, 32>; using ElementB = cutlass::int4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<cutlass::int4b_t, 32>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k128.row.col.s32.u4.s4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; /// Matrix multiply-add operation: S32 = U4 * U4 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,128>, 32, cutlass::uint4b_t, layout::RowMajor, cutlass::uint4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAdd, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,128>; using ElementA = cutlass::uint4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<cutlass::uint4b_t, 32>; using ElementB = cutlass::uint4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<cutlass::uint4b_t, 32>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k128.row.col.s32.u4.u4.s32 {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; //////////////////////////////////////////////////////////////////////////////// // // Sparse Matrix Multiply 168128 - S4 input, S32 accumulation - SATURATE // //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: S32 = S4 * S4 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,128>, 32, cutlass::int4b_t, layout::RowMajor, cutlass::int4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,128>; using ElementA = cutlass::int4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<cutlass::int4b_t, 32>; using ElementB = cutlass::int4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<cutlass::int4b_t, 32>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k128.row.col.s32.s4.s4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; /// Matrix multiply-add operation: S32 = S4 * U4 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,128>, 32, cutlass::int4b_t, layout::RowMajor, cutlass::uint4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,128>; using ElementA = cutlass::int4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<cutlass::int4b_t, 32>; using ElementB = cutlass::uint4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<cutlass::uint4b_t, 32>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k128.row.col.s32.s4.u4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; /// Matrix multiply-add operation: S32 = U4 * S4 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,128>, 32, cutlass::uint4b_t, layout::RowMajor, cutlass::int4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,128>; using ElementA = cutlass::uint4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<cutlass::uint4b_t, 32>; using ElementB = cutlass::int4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<cutlass::int4b_t, 32>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k128.row.col.s32.u4.s4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; /// Matrix multiply-add operation: S32 = U4 * U4 + S32 template <> struct SparseMma< gemm::GemmShape<16,8,128>, 32, cutlass::uint4b_t, layout::RowMajor, cutlass::uint4b_t, layout::ColumnMajor, int, layout::RowMajor, OpMultiplyAddSaturate, SPFormatType::Thread> { using Shape = gemm::GemmShape<16,8,128>; using ElementA = cutlass::uint4b_t; using LayoutA = layout::RowMajor; using FragmentA = Array<cutlass::uint4b_t, 32>; using ElementB = cutlass::uint4b_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<cutlass::uint4b_t, 32>; using ElementC = int; using LayoutC = layout::RowMajor; using FragmentC = Array<int, 4>; using FragmentE = uint32_t; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm80; static int const kSparse = 2; static int const kMetaSizeInBits = 2; static int const kMaxID2 = 1; /// Computes multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c, uint32_t const &E, int const id2 ) const { #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED) uint32_t const *A = reinterpret_cast<uint32_t const *>(&a); uint32_t const *B = reinterpret_cast<uint32_t const *>(&b); int const *C = reinterpret_cast<int const *>(&c); int *D = reinterpret_cast<int *>(&d); if (id2 == 0) asm volatile( "mma.sp.sync.aligned.m16n8k128.row.col.s32.u4.u4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, " "{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E)); else assert(0); #else CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_UNUSED(d); assert(0); #endif } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace arch } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/arch/mma_sparse_sm80.h/0
{ "file_path": "cutlass/include/cutlass/arch/mma_sparse_sm80.h", "repo_id": "cutlass", "token_count": 20161 }
16
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Enumerated type describing the type of kernel (based on input or output matrices). enum class BlasMode { kGemm, kSymmetric, kHermitian, kTriangular, kInvalid }; /// Enumerated type describing the fill mode for matrices for BLAS functions. enum class FillMode { kFull, /// The entire tensor is covered. kLower, /// The 'lower' part of a tensor is covered including diagonal kUpper, /// The 'upper' part of a tensor is covered including diaognal kDiagonal, /// Only diagonal elements are covered. kNone, /// No element is covered. kInvalid }; /// Enumerated type describing the diagonal property of matrices for BLAS functions. enum class DiagType { kNonUnit, kUnit, kZero, // Only used internally for computing SYMM/HEMM kInvalid }; /// Enumerated type describing the side dense matrix is in matrix equation for BLAS functions. enum class SideMode { kLeft, kRight, kInvalid }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/blas3_types.h/0
{ "file_path": "cutlass/include/cutlass/blas3_types.h", "repo_id": "cutlass", "token_count": 835 }
17
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Template for device-level Depthwise Convolution */ #pragma once #include <limits> #include "cutlass/cutlass.h" #include "cutlass/device_kernel.h" #include "cutlass/conv/convolution.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template<typename DirectConvolutionKernel_> class DirectConvolution { public: using UnderlyingKernel = DirectConvolutionKernel_; using ElementA = typename UnderlyingKernel::ElementA; using LayoutA = typename UnderlyingKernel::LayoutA; using ElementB = typename UnderlyingKernel::ElementB; using LayoutB = typename UnderlyingKernel::LayoutB; using ElementC = typename UnderlyingKernel::ElementC; using LayoutC = typename UnderlyingKernel::LayoutC; using ElementAccumulator = typename UnderlyingKernel::ElementAccumulator; using ElementCompute = typename UnderlyingKernel::ElementCompute; using OperatorClass = typename UnderlyingKernel::OperatorClass; using ArchTag = typename UnderlyingKernel::ArchTag; using ThreadblockShape = typename UnderlyingKernel::ThreadblockShape; using WarpShape = typename UnderlyingKernel::WarpShape; using InstructionShape = typename UnderlyingKernel::InstructionShape; using ThreadblockSwizzle = typename UnderlyingKernel::ThreadblockSwizzle; using EpilogueOutputOp = typename UnderlyingKernel::EpilogueOutputOp; static int const kStages = UnderlyingKernel::kStages; static int const kConvDim = UnderlyingKernel::kConvDim; using WarpMmaOperator = typename UnderlyingKernel::WarpMmaOperator; using ArchMmaOperator = typename UnderlyingKernel::ArchMmaOperator; using MathOperator = typename UnderlyingKernel::MathOperator; static cutlass::conv::Operator const kConvolutionalOperator = UnderlyingKernel::kConvolutionalOperator; static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = UnderlyingKernel::kIteratorAlgorithm; static cutlass::conv::StrideSupport const kStrideSupport = UnderlyingKernel::kStrideSupport; static cutlass::conv::GroupMode const kGroupMode = UnderlyingKernel::kGroupMode; static int const kWarpCount = (ThreadblockShape::kM / WarpShape::kM) * (ThreadblockShape::kN / WarpShape::kN) * (ThreadblockShape::kK / WarpShape::kK); /// Argument structure using Arguments = typename UnderlyingKernel::Arguments; using ReorderKernel = typename UnderlyingKernel::ReorderKernel; private: /// Kernel parameters object typename UnderlyingKernel::Params params_; public: /// Constructs Implicit GEMM DirectConvolution() { } /// Determines whether the Implicit GEMM can execute the given problem. static Status can_implement(Arguments const &args) { // dispatch to iterators Status status = UnderlyingKernel::Mma::IteratorA::can_implement(args.problem_size); if (Status::kSuccess != status) { return status; } status = UnderlyingKernel::Mma::IteratorB::can_implement(args.problem_size); if (Status::kSuccess != status) { return status; } if (kGroupMode != conv::GroupMode::kDepthwise) { return Status::kErrorInvalidProblem; } // C and K should be multiple of groups if (args.problem_size.K != args.problem_size.groups && args.problem_size.C != args.problem_size.groups) { return Status::kErrorInvalidProblem; } static int const kAlignmentC = UnderlyingKernel::Epilogue::OutputTileIterator::kElementsPerAccess; if (kConvolutionalOperator == conv::Operator::kFprop) { if (args.problem_size.K % kAlignmentC) return Status::kErrorMisalignedOperand; } else if (kConvolutionalOperator == conv::Operator::kDgrad) { if (args.problem_size.C % kAlignmentC) return Status::kErrorMisalignedOperand; } else if (kConvolutionalOperator == conv::Operator::kWgrad) { if (args.problem_size.C % kAlignmentC) return Status::kErrorMisalignedOperand; } // Determine grid shape ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape( threadblock_swizzle.get_tiled_shape( kConvolutionalOperator, args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices)); if (!(grid.y <= std::numeric_limits<uint16_t>::max() && grid.z <= std::numeric_limits<uint16_t>::max())) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { return 0; } /// Initializes GEMM state from arguments. Status initialize( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { // initialize the params structure from the arguments params_ = typename UnderlyingKernel::Params( args, static_cast<int *>(workspace) ); int smem_size = int(sizeof(typename UnderlyingKernel::SharedStorage)); if (smem_size >= (48 << 10)) { cudaError_t result = cudaFuncSetAttribute(cutlass::Kernel<UnderlyingKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } return Status::kSuccess; } /// Initializes GEMM state from arguments. Status update(Arguments const &args, void *workspace = nullptr) { // update the params structure from the arguments params_.ptr_A = args.ref_A.data(); params_.ptr_B = args.ref_B.data(); params_.ptr_C = args.ref_C.data(); params_.ptr_D = args.ref_D.data(); params_.output_op = args.output_op; params_.ptr_reordered_B = args.ref_reordered_B.data();; params_.semaphore = static_cast<int *>(workspace); return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { // Launch reorder kernel if (params_.ptr_reordered_B != nullptr) { dim3 grid = ReorderKernel::get_grid_shape(params_); dim3 block = ReorderKernel::get_block_shape(); cutlass::Kernel<ReorderKernel><<<grid, block, 0, stream>>>(params_); } // Launch main kernel ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); dim3 block(32 * kWarpCount, 1, 1); // Dynamic SMEM size based on input params. int smem_size = int(params_.get_smem_size()); // Make sure we can use that much shared memory. cudaError_t status = cudaFuncSetAttribute(cutlass::Kernel<UnderlyingKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (status != cudaSuccess) return Status::kErrorInternal; cutlass::Kernel<UnderlyingKernel><<<grid, block, smem_size, stream>>>(params_); cudaError_t result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } int get_smem_size() { return int(params_.get_smem_size()); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } } } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/device/direct_convolution.h/0
{ "file_path": "cutlass/include/cutlass/conv/device/direct_convolution.h", "repo_id": "cutlass", "token_count": 3160 }
18
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_analytic.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dFprop template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kUnity > struct DefaultConv3dFprop; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dFprop specialization for Analytic Iterator Algorithm /// and 2 stage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport > struct DefaultConv3dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dFprop specialization for Analytic IteratorAlgorithm and multistage // pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport > struct DefaultConv3dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Global, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dFprop specialization for Optimized Iterator Algorithm /// and 2 stage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport > struct DefaultConv3dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dFprop specialization for Optimized IteratorAlgorithm and multistage // pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport > struct DefaultConv3dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Global, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount, false, layout::NoPermute, StrideSupport, 5 >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassSimt convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dFprop specialization for Analytic IteratorAlgorithm, /// multi-stage pipeline, and FFMA-based mainloop for SM80 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport > struct DefaultConv3dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount, false, layout::NoPermute, StrideSupport, 5 >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dFprop specialization for Optimized IteratorAlgorithm, /// multi-stage pipeline, and FFMA-based mainloop for SM80 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport > struct DefaultConv3dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount, false, layout::NoPermute, StrideSupport, 5 >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dFprop specialization for Analytic IteratorAlgorithm, /// 2 stage pipeline, and FFMA-based mainloop for SM50 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport > struct DefaultConv3dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount, false, layout::NoPermute, StrideSupport, 5 >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dFprop specialization for Optimized IteratorAlgorithm, /// 2 stage pipeline, and FFMA-based mainloop for SM50 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport > struct DefaultConv3dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount, false, layout::NoPermute, StrideSupport, 5 >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/kernel/default_conv3d_fprop.h/0
{ "file_path": "cutlass/include/cutlass/conv/kernel/default_conv3d_fprop.h", "repo_id": "cutlass", "token_count": 9263 }
19
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates exposing architecture support for depthwise convolution */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/matrix.h" #include "cutlass/arch/mma.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/thread/mma.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// MMA operation template < /// Size of the matrix product (concept: GemmShape) typename Shape_, /// Number of threads participating int kThreads_, /// Data type of A elements typename ElementA, /// Data type of B elements typename ElementB, /// Element type of C matrix typename ElementC, /// Inner product operator typename Operator > struct ElementwiseInnerProduct; ///////////////////////////////////////////////////////////////////////////////////////////////// /// General implementation template < /// Size of the matrix product (concept: GemmShape) typename Shape_, /// Data type of A elements typename ElementA_, /// Data type of B elements typename ElementB_, /// Element type of C matrix typename ElementC_> struct ElementwiseInnerProduct<Shape_, 1, ElementA_, ElementB_, ElementC_, arch::OpMultiplyAdd> { using Shape = Shape_; using Operator = arch::OpMultiplyAdd; using ElementC = ElementC_; CUTLASS_HOST_DEVICE void operator()(Array<ElementC_, Shape::kN> &d, Array<ElementA_, Shape::kN> const &a, Array<ElementB_, Shape::kN> const &b, Array<ElementC_, Shape::kN> const &c) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Shape::kN; ++i) { d[i] = a[i] * b[i] + c[i]; } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Specialization of half_t template <> struct ElementwiseInnerProduct< gemm::GemmShape<2, 2, 1>, 1, half_t, half_t, half_t, arch::OpMultiplyAdd> { using Shape = gemm::GemmShape<2, 2, 1>; using Operator = arch::OpMultiplyAdd; using ElementC = half_t; CUTLASS_HOST_DEVICE void operator()( Array<half_t, 2> &d, Array<half_t, 2> const &a, Array<half_t, 2> const &b, Array<half_t, 2> const &c ) { #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)) __half2 const & A = reinterpret_cast<__half2 const &>(a); __half2 const & B = reinterpret_cast<__half2 const &>(b); __half2 const & C = reinterpret_cast<__half2 const &>(c); __half2 tmp_D = __hfma2(A, B, C); d = reinterpret_cast<Array<half_t, 2> const &>(tmp_D); #else CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 2; ++i) { d[i] = a[i] * b[i] + c[i]; } #endif } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape, /// Data type of A elements typename ElementA, /// Data type of B elements typename ElementB, /// Element type of C matrix typename ElementC, /// Concept: arch::OpMultiplyAdd or arch::Mma<> typename Operator = arch::OpMultiplyAdd, /// Used for partial specialization typename Enable = bool > struct DepthwiseDirectConvElementwiseInnerProduct; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Gemplate that handles all packed matrix layouts template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Data type of A elements typename ElementA_, /// Data type of B elements typename ElementB_, /// Element type of C matrix typename ElementC_, /// Operator used to compute GEMM typename Operator_ > struct DepthwiseDirectConvElementwiseInnerProductGeneric { /// Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; /// Data type of operand A using ElementA = ElementA_; /// Data type of operand B using ElementB = ElementB_; /// Element type of operand C using ElementC = ElementC_; /// Underlying mathematical operator using Operator = Operator_; /// A operand storage using FragmentA = Array<ElementA, Shape::kMN>; /// B operand storage using FragmentB = Array<ElementB, Shape::kN>; /// C operand storage using FragmentC = Array<ElementC, Shape::kMN>; /// Instruction using MmaOp = cutlass::conv::thread::ElementwiseInnerProduct< gemm::GemmShape<Shape::kN, Shape::kN, 1>, 1, ElementA, ElementB, ElementC, Operator>; // // Methods // /// Computes a matrix product D = A * B + C CUTLASS_HOST_DEVICE void operator()( FragmentC & D, FragmentA const & A, FragmentB const & B, FragmentC const & C) { Array<ElementC, Shape::kN> *ptr_D = reinterpret_cast<Array<ElementC, Shape::kN> *>(&D); Array<ElementA, Shape::kN> const *ptr_A = reinterpret_cast<Array<ElementA, Shape::kN> const *>(&A); Array<ElementB, Shape::kN> const *ptr_B = reinterpret_cast<Array<ElementB, Shape::kN> const *>(&B); MmaOp mma_op; // Copy accumulators D = C; // Compute matrix product CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Shape::kN / MmaOp::Shape::kN; ++n) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Shape::kM; ++m) { Array<ElementC, MmaOp::Shape::kN> tmpD = ptr_D[m * Shape::kN / MmaOp::Shape::kN + n]; Array<ElementA, MmaOp::Shape::kN> tmpA = ptr_A[m * Shape::kN / MmaOp::Shape::kN + n]; Array<ElementB, MmaOp::Shape::kN> tmpB = ptr_B[n]; mma_op(tmpD, tmpA, tmpB, tmpD); ptr_D[m * Shape::kN / MmaOp::Shape::kN + n] = tmpD; } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Data type of A elements typename ElementA_, /// Data type of B elements typename ElementB_, /// Element type of C matrix typename ElementC_ > struct DepthwiseDirectConvElementwiseInnerProduct< Shape_, ElementA_, ElementB_, ElementC_, arch::OpMultiplyAdd > { /// Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; /// Data type of operand A using ElementA = ElementA_; /// Data type of operand B using ElementB = ElementB_; /// Element type of operand C using ElementC = ElementC_; /// Underlying mathematical operator using Operator = arch::OpMultiplyAdd; /// A operand storage using FragmentA = Array<ElementA, Shape::kMN>; // output_tile_size per thread * groups_per_thread /// B operand storage using FragmentB = Array<ElementB, Shape::kN>; // 1 * groups_per_thread /// C operand storage using FragmentC = Array<ElementC, Shape::kMN>; // output_tile_size per thread * groups_per_thread static bool const use_optimized = 0; using ArchMmaOperator = DepthwiseDirectConvElementwiseInnerProductGeneric<Shape, ElementA, ElementB, ElementC, Operator>; // // Methods // /// Computes a matrix product D = A * B + C CUTLASS_HOST_DEVICE void operator()( FragmentC & D, FragmentA const & A, FragmentB const & B, FragmentC const & C) { ArchMmaOperator mma; mma(D, A, B, C); } }; } // namespace thread } // namespace conv } // namespace cutlass
cutlass/include/cutlass/conv/thread/depthwise_mma.h/0
{ "file_path": "cutlass/include/cutlass/conv/thread/depthwise_mma.h", "repo_id": "cutlass", "token_count": 3403 }
20
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Extracts the host-params objects into non-template code. */ #pragma once #define TRACE_CONV_PARAMS_INITIALIZERS_ENABLED 0 #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #if TRACE_CONV_PARAMS_INITIALIZERS_ENABLED #include <fstream> #endif ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized template<typename Layout_ = layout::TensorNHWC > struct Depthwise2dFpropDirectConvParams; /// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation template<typename Layout_ = layout::TensorNHWC > struct Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams; /// Parameters structure used for DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized template<typename Layout_ = layout::TensorNHWC > struct Depthwise2dFpropDirectConvFilterIteratorParams; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized template<> struct Depthwise2dFpropDirectConvParams<layout::TensorNHWC> { using Layout = layout::TensorNHWC; Layout layout; int32_t activation_tile_h; int32_t activation_tile_w; int32_t activation_tile_hw; FastDivmod activation_tile_w_divmod; int filter[2]; int stride[2]; int dilation[2]; int inc_next[2]; FastDivmod pq_divmod; FastDivmod q_divmod; int activation_load_count; int activation_storage_elements; int activation_size; // // Methods // CUTLASS_HOST_DEVICE Depthwise2dFpropDirectConvParams() { } CUTLASS_HOST_DEVICE Depthwise2dFpropDirectConvParams( Conv2dProblemSize const &problem_size, Layout const &layout, ///< layout object MatrixCoord threadblock_shape, ///< CTA threadblock Shape Layout::TensorCoord threadblock_output_shape, ///< Output tile Shape per threadblock const int element_size_bits, ///< bits of activation element const int thread_count, ///< threads per threadblock const int thread_count_contiguous, ///< number of threads for continuous dimension const int element_per_load) ///< element per each load : layout(layout) { filter[0] = problem_size.S; filter[1] = problem_size.R; stride[0] = problem_size.stride_w; stride[1] = problem_size.stride_h; dilation[0] = problem_size.dilation_w; dilation[1] = problem_size.dilation_h; // Compute activation_tile size per threadblock because stride and dilation are runtime params. activation_tile_h = (threadblock_output_shape.h() - 1) * problem_size.stride_h + (problem_size.R - 1) * problem_size.dilation_h + 1; activation_tile_w = (threadblock_output_shape.w() - 1) * problem_size.stride_w + (problem_size.S - 1) * problem_size.dilation_w + 1; activation_tile_hw = activation_tile_h * activation_tile_w; activation_tile_w_divmod = FastDivmod(activation_tile_w); /// Below two values could not be templatized because the stride and dilation are runtime params activation_load_count = (thread_count_contiguous * activation_tile_hw + (thread_count - 1)) / thread_count; activation_storage_elements = activation_load_count * element_per_load * thread_count; activation_size = activation_storage_elements * element_size_bits / 8; // Fastdivmod for output P, Q int tiles_p = (problem_size.P + (threadblock_output_shape.h() - 1)) / (threadblock_output_shape.h()); int tiles_q = (problem_size.Q + (threadblock_output_shape.w() - 1)) / (threadblock_output_shape.w()); pq_divmod = FastDivmod(tiles_p * tiles_q); q_divmod = FastDivmod(tiles_q); // next S inc_next[0] = problem_size.dilation_w; // next R inc_next[1] = (activation_tile_w * problem_size.dilation_h - (problem_size.S - 1) * problem_size.dilation_w); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation template <> struct Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams<layout::TensorNHWC> { using Layout = layout::TensorNHWC; Layout layout; FastDivmod pq_divmod; FastDivmod q_divmod; int activation_size; // // Methods // CUTLASS_HOST_DEVICE Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams() {} CUTLASS_HOST_DEVICE Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams( Conv2dProblemSize const &problem_size, Layout const &layout, ///< Layout object MatrixCoord threadblock_shape, ///< Threadblock Shape Layout::TensorCoord threadblock_output_shape, ///< Output tile Shape per threadblock const int activation_size_ ///< Activation size loaded by iterator ) : layout(layout), activation_size(activation_size_) { // Fastdivmod for output P, Q int tiles_p = (problem_size.P + (threadblock_output_shape.h() - 1)) / (threadblock_output_shape.h()); int tiles_q = (problem_size.Q + (threadblock_output_shape.w() - 1)) / (threadblock_output_shape.w()); pq_divmod = FastDivmod(tiles_p * tiles_q); q_divmod = FastDivmod(tiles_q); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters structure used for DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized template <> struct Depthwise2dFpropDirectConvFilterIteratorParams<layout::TensorNHWC> { using Layout = layout::TensorNHWC; Layout layout; int filter_size; bool is_convolution; // // Methods // CUTLASS_HOST_DEVICE Depthwise2dFpropDirectConvFilterIteratorParams() {} CUTLASS_HOST_DEVICE Depthwise2dFpropDirectConvFilterIteratorParams( Conv2dProblemSize const &problem_size, Layout const &layout, ///< Layout object MatrixCoord threadblock_shape, ///< Threadblock Shape const int filter_size_) ///< Filter size loaded by iterator : layout(layout), filter_size(filter_size_), is_convolution(problem_size.mode == Mode::kConvolution){} }; } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/depthwise_direct_conv_params.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/depthwise_direct_conv_params.h", "repo_id": "cutlass", "token_count": 2901 }
21
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Describes the lane policy used by warp-level matrix multiply operators targeting SIMT instructions */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/tensor_ref.h" #include "cutlass/matrix_shape.h" #include "cutlass/conv/convolution.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma_simt_policy.h" #include "cutlass/gemm/warp/mma_simt_tile_iterator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Iterates over operands to warp-level matrix multiply operations targeting SIMT instructions /// /// concept: MutableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Operand identity cutlass::gemm::Operand Operand, /// Data type of A elements typename Element_, /// Layout of operand typename Layout_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Number of partitions along K dimension - used in sliced-K int PartitionsK = 1, /// Group Size along kPartition - used in sliced-K int PartitionGroupSize = 1 > class DepthwiseMmaSimtTileIterator; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Specialization for B operands of row-major layouts /// /// Concept: MutableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of A elements typename Element_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Number of partitions along K dimension int PartitionsK, /// Group Size along kPartition - used in sliced-K int PartitionGroupSize> class DepthwiseMmaSimtTileIterator<Shape_, cutlass::gemm::Operand::kB, Element_, layout::RowMajor, Policy_, PartitionsK, PartitionGroupSize> : public cutlass::gemm::warp::MmaSimtTileIterator<Shape_, cutlass::gemm::Operand::kB, Element_, layout::RowMajor, Policy_, PartitionsK, PartitionGroupSize> { using Base = cutlass::gemm::warp::MmaSimtTileIterator<Shape_, cutlass::gemm::Operand::kB, Element_, layout::RowMajor, Policy_, PartitionsK, PartitionGroupSize>; public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static cutlass::gemm::Operand const kOperand = cutlass::gemm::Operand::kB; /// Element type using Element = Element_; /// Layout of policy using Layout = layout::RowMajor; /// Decomposition of elements among threads using Policy = Policy_; /// TensorRef type for loading element from a tensor using TensorRef = typename Base::TensorRef; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Thread-level shape of a fragment using ThreadShape = typename Base::ThreadShape; /// Number of individual loads using Iterations = typename Base::Iterations; /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; static_assert(Policy::LaneMmaShape::kN == 1, "Each thread should be 1 element per LDS along the k-dim"); private: MatrixCoord lane_offset_; int channel_idx_; int base_channel_idx_; int warps_n_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE DepthwiseMmaSimtTileIterator():Base() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE DepthwiseMmaSimtTileIterator( TensorRef ref, int lane_id ) : Base(ref, lane_id) { // compute offset based on thread ID and lane layout typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); warps_n_ = -1; channel_idx_ = 0; base_channel_idx_ = 0; lane_offset_ = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN); } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE DepthwiseMmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { if(warps_n_ == -1){ warps_n_ = coord.column(); } Base::add_tile_offset(coord); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. (vector loads) CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { Array<Element, Policy::LaneMmaShape::kN> *dst_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kRow; ++k) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kColumn; ++n) { void const *ptr = this->ref_.data() + this->ref_.offset({-(channel_idx_ - base_channel_idx_), n * Policy::WarpShape::kColumn}) + pointer_offset / Policy::LaneMmaShape::kN; // Base_k of a warp + Base_k of current threads. int thread_k_base_idx = warps_n_ * Shape::kColumn / Policy::LaneMmaShape::kN + lane_offset_.column(); if (channel_idx_ + k == thread_k_base_idx + n * Policy::WarpShape::kColumn) { // Depthwise kernel would only do computation when channel == k. // Loads an element when the current computation channel == the k corresponding to this thread. arch::shared_load(dst_ptr[n + k * Iterations::kColumn], ptr); } else { // Reduce SMEM load dst_ptr[n + k * Iterations::kColumn].fill(Element(0)); } } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index CUTLASS_DEVICE void set_kgroup_index(int k_group) { if(k_group % PartitionGroupSize == 0 && k_group != 0){ base_channel_idx_ = k_group; } channel_idx_ = k_group; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Size of filter (concept: gemm::GemmShape<Depth, Height, Width>) typename FilterShape_, /// Size of the matrix to load (concept: MatrixShape) typename ThreadOutputShape_, /// Size of the matrix to load (concept: MatrixShape) typename ThreadBlockOutputShape_, /// Operand identity cutlass::gemm::Operand Operand, /// Data type of A elements typename Element_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Iterator algo type conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic, /// Stride ( MatrixShape<Height, Width> ) typename StrideShape = cutlass::MatrixShape<-1, -1>, /// Dilation ( MatrixShape<Height, Width> ) typename DilationShape = cutlass::MatrixShape<-1, -1>, /// Activation Shape loaded by threadblock typename ActivationShape = cutlass::conv::TensorNHWCShape<-1,-1,-1,-1>, /// Number of partitions along K dimension - used in sliced-K int PartitionsK = 1, /// Group Size along kPartition - used in sliced-K int PartitionGroupSize = 1> class DepthwiseDirect2dConvSimtTileIterator; /// Specialization for A operands of row-major layouts /// /// Concept: MutableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Size of filter (concept: gemm::GemmShape<Depth, Height, Width>) typename FilterShape_, /// Size of the matrix to load (concept: TensorNHWC) typename ThreadOutputShape_, /// Size of the matrix to load (concept: TensorNHWC) typename ThreadBlockOutputShape_, /// Data type of A elements typename Element_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Iterator algo type conv::IteratorAlgorithm IteratorAlgorithm, /// Stride ( MatrixShape<Height, Width> ) typename StrideShape, /// Dilation ( MatrixShape<Height, Width> ) typename DilationShape, /// Activation Shape loaded by threadblock typename ActivationShape, /// Number of partitions along K dimension - used in sliced-K int PartitionsK, /// Group Size along kPartition - used in sliced-K int PartitionGroupSize> class DepthwiseDirect2dConvSimtTileIterator<Shape_, FilterShape_, ThreadOutputShape_, ThreadBlockOutputShape_, cutlass::gemm::Operand::kA, Element_, Policy_, IteratorAlgorithm, StrideShape, DilationShape, ActivationShape, PartitionsK, PartitionGroupSize> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Shape of filter (concept: gemm::GemmShape<Depth, Height, Width>) using FilterShape = FilterShape_; /// Shape of tile to load (concept: TensorNHWC) using ThreadOutputShape = ThreadOutputShape_; /// Shape of tile to load (concept: TensorNHWC) using ThreadBlockOutputShape = ThreadBlockOutputShape_; /// Operand tag static cutlass::gemm::Operand const kOperand = cutlass::gemm::Operand::kA; /// Element type using Element = Element_; /// Layout of policy using Layout = layout::RowMajor; /// Decomposition of elements among threads using Policy = Policy_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; // // Derived quantities // static_assert(!(Shape::kRow % Policy::WarpShape::kRow), "The warp-level GEMM M size must be divisible by the number of threads arranged along the M dimension."); static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero."); static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero."); // Thread-level shape of a fragment using ThreadShape = MatrixShape< ThreadOutputShape::kNHW, // Output tile shape Computed by current threads ThreadOutputShape::kC >; static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN), "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); /// Number of individual loads using Iterations = MatrixShape< ThreadShape::kRow, ThreadShape::kColumn / Policy::LaneMmaShape::kN >; using ThreadTileCount = MatrixShape< ThreadBlockOutputShape::kH / ThreadOutputShape::kH, ThreadBlockOutputShape::kW / ThreadOutputShape::kW >; /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, ThreadShape::kCount>; protected: /// Internal reference cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kN>, layout::RowMajor> ref_; int activation_offset[ThreadOutputShape::kH][ThreadOutputShape::kW][Iterations::kColumn]; int iterator_r_; int iterator_s_; int iterator_offset_; int inc_next_s_ ; int inc_next_r_ ; MatrixCoord lane_offset_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE DepthwiseDirect2dConvSimtTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE DepthwiseDirect2dConvSimtTileIterator( TensorRef ref, int lane_id ) { // compute offset based on thread ID and lane layout typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); // Set channel offset lane_offset_ = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN); ref.add_coord_offset(lane_offset_); ref_.reset(reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(ref.data()), ref.stride(0) / Policy::LaneMmaShape::kN); iterator_r_ = 0; iterator_s_ = 0; iterator_offset_ = 0; } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE DepthwiseDirect2dConvSimtTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. template<typename Params> CUTLASS_HOST_DEVICE void setup_initial_status(Params const& params) { inc_next_s_ = params.inc_next[0]; inc_next_r_ = params.inc_next[1]; // Get base HW offset of current threads int threadgroup = threadIdx.x / (ThreadBlockOutputShape::kC / ThreadOutputShape::kC); int base_p_ = (threadgroup / (ThreadTileCount::kColumn)) * ThreadOutputShape::kH; int base_q_ = (threadgroup % (ThreadTileCount::kColumn)) * ThreadOutputShape::kW; CUTLASS_PRAGMA_UNROLL for (int p = 0; p < ThreadOutputShape::kH; ++p) { CUTLASS_PRAGMA_UNROLL for (int q = 0; q < ThreadOutputShape::kW; ++q) { CUTLASS_PRAGMA_UNROLL for (int col = 0; col < Iterations::kColumn; ++col) { int base_w = (base_q_ + q) * params.stride[0]; int base_h = (base_p_ + p) * params.stride[1]; int offset = base_h * params.activation_tile_w + base_w; activation_offset[p][q][col] = offset; } } } } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE DepthwiseDirect2dConvSimtTileIterator &add_tile_offset(TensorCoord const &coord) { // Set warp row and col start lane_offset_ = MatrixCoord({lane_offset_.row() + coord.row() * Shape::kRow, lane_offset_.column()}); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE void advance(int32_t pointer_offset) { ref_.reset(ref_.data() + pointer_offset / sizeof(Element) / Policy::LaneMmaShape::kN); iterator_s_ = 0; iterator_r_ = 0; iterator_offset_ = 0; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE DepthwiseDirect2dConvSimtTileIterator &operator++() { ++iterator_s_; if (iterator_s_ < FilterShape::kColumn) { iterator_offset_ += inc_next_s_; return *this; } iterator_s_ = 0; ++iterator_r_; if (iterator_r_ < FilterShape::kRow) { iterator_offset_ += inc_next_r_; return *this; } iterator_r_ = 0; iterator_offset_ = 0; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE DepthwiseDirect2dConvSimtTileIterator & operator--() { // Do nothing return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. (vector loads) CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { Array<Element, Policy::LaneMmaShape::kN> *dst_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int p = 0; p < ThreadOutputShape::kH; ++p) { CUTLASS_PRAGMA_UNROLL for (int q = 0; q < ThreadOutputShape::kW; ++q) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kColumn; ++n) { void const *ptr = ref_.data() + ref_.offset({activation_offset[p][q][n] + (iterator_offset_), n * Policy::WarpShape::kColumn}) + pointer_offset / Policy::LaneMmaShape::kN; arch::shared_load(dst_ptr[n + q + p * ThreadOutputShape::kW], ptr); } } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { // Do nothing at present. } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag, Index pointer_offset) const { store_with_pointer_offset(frag, 0); } CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; /////////////////////////////////////////////////////////////////////////////////////////////////// /// Specialization for A operands of row-major layouts /// /// Concept: MutableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Size of filter (concept: gemm::GemmShape<Depth, Height, Width>) typename FilterShape_, /// Size of the matrix to load (concept: TensorNHWC) typename ThreadOutputShape_, /// Size of the matrix to load (concept: TensorNHWC) typename ThreadBlockOutputShape_, /// Data type of A elements typename Element_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Stride ( MatrixShape<Height, Width> ) typename StrideShape_, /// Dilation ( MatrixShape<Height, Width> ) typename DilationShape_, /// Activation Shape loaded by threadblock typename ActivationShape_, /// Number of partitions along K dimension - used in sliced-K int PartitionsK, /// Group Size along kPartition - used in sliced-K int PartitionGroupSize> class DepthwiseDirect2dConvSimtTileIterator<Shape_, FilterShape_, ThreadOutputShape_, ThreadBlockOutputShape_, cutlass::gemm::Operand::kA, Element_, Policy_, IteratorAlgorithm::kFixedStrideDilation, StrideShape_, DilationShape_, ActivationShape_, PartitionsK, PartitionGroupSize> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Shape of filter (concept: gemm::GemmShape<Depth, Height, Width>) using FilterShape = FilterShape_; /// Shape of tile to load (concept: TensorNHWC) using ThreadOutputShape = ThreadOutputShape_; /// Shape of tile to load (concept: TensorNHWC) using ThreadBlockOutputShape = ThreadBlockOutputShape_; /// Stride ( MatrixShape<Height, Width> ) using StrideShape = StrideShape_; /// Dilation ( MatrixShape<Height, Width> ) using DilationShape = DilationShape_; /// Activation Shape loaded by threadblock using ActivationShape = ActivationShape_; /// Operand tag static cutlass::gemm::Operand const kOperand = cutlass::gemm::Operand::kA; /// Element type using Element = Element_; /// Layout of policy using Layout = layout::RowMajor; /// Decomposition of elements among threads using Policy = Policy_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; // // Derived quantities // static_assert(!(Shape::kRow % Policy::WarpShape::kRow), "The warp-level GEMM M size must be divisible by the number of threads arranged " "along the M dimension."); static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero."); static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero."); // Activations loaded by threadblock static int const ThreadActivationShapeH = (ThreadOutputShape::kH - 1) * StrideShape::kRow + (FilterShape::kRow - 1) * DilationShape::kRow + 1; static int const ThreadActivationShapeW = (ThreadOutputShape::kW - 1) * StrideShape::kColumn + (FilterShape::kColumn - 1) * DilationShape::kColumn + 1; using ThreadActivationShape = cutlass::conv:: TensorNHWCShape<1, ThreadActivationShapeH, ThreadActivationShapeW, ThreadOutputShape::kC>; // Thread-level shape of a fragment using ThreadShape = MatrixShape<ThreadOutputShape::kNHW, ThreadOutputShape::kC>; static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN), "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); /// Number of individual loads using Iterations = MatrixShape<ThreadShape::kRow, ThreadShape::kColumn / Policy::LaneMmaShape::kN>; using ThreadTileCount = MatrixShape<ThreadBlockOutputShape::kH / ThreadOutputShape::kH, ThreadBlockOutputShape::kW / ThreadOutputShape::kW>; /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, ThreadShape::kCount>; protected: /// Internal reference cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kN>, layout::RowMajor> ref_; Array<Element, Policy::LaneMmaShape::kN> activation[ThreadActivationShape::kH][ThreadActivationShape::kW][Iterations::kColumn]; int iterator_r_; int iterator_s_; MatrixCoord lane_offset_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE DepthwiseDirect2dConvSimtTileIterator() {} /// Constructor from TensorRef CUTLASS_HOST_DEVICE DepthwiseDirect2dConvSimtTileIterator(TensorRef ref, int lane_id) { // compute offset based on thread ID and lane layout typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); // Set channel offset lane_offset_ = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN); ref.add_coord_offset(lane_offset_); ref_.reset(reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(ref.data()), ref.stride(0) / Policy::LaneMmaShape::kN); iterator_r_ = 0; iterator_s_ = 0; } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE DepthwiseDirect2dConvSimtTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. template <typename Params> CUTLASS_HOST_DEVICE void setup_initial_status( Params const &params) { // Get base HW offset of current threads int threadgroup = threadIdx.x / (ThreadBlockOutputShape::kC / ThreadOutputShape::kC); int base_h = (threadgroup / (ThreadTileCount::kColumn)) * ThreadOutputShape::kH * StrideShape::kRow; int base_w = (threadgroup % (ThreadTileCount::kColumn)) * ThreadOutputShape::kW * StrideShape::kColumn; CUTLASS_PRAGMA_UNROLL for (int h = 0; h < ThreadActivationShape::kH; ++h) { CUTLASS_PRAGMA_UNROLL for (int w = 0; w < ThreadActivationShape::kW; ++w) { CUTLASS_PRAGMA_UNROLL for (int col = 0; col < Iterations::kColumn; ++col) { int offset = (base_h + h) * ActivationShape::kW + (base_w + w); void const *ptr = ref_.data() + ref_.offset({offset, col * Policy::WarpShape::kColumn}); arch::shared_load(activation[h][w][col], ptr); } } } } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE DepthwiseDirect2dConvSimtTileIterator &add_tile_offset(TensorCoord const &coord) { // Set warp row and col start lane_offset_ = MatrixCoord({lane_offset_.row() + coord.row() * Shape::kRow, lane_offset_.column()}); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE void advance(int32_t pointer_offset) { ref_.reset(ref_.data() + pointer_offset / sizeof(Element) / Policy::LaneMmaShape::kN); iterator_s_ = 0; iterator_r_ = 0; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE DepthwiseDirect2dConvSimtTileIterator &operator++() { ++iterator_s_; if (iterator_s_ < FilterShape::kColumn) { return *this; } iterator_s_ = 0; ++iterator_r_; if (iterator_r_ < FilterShape::kRow) { return *this; } iterator_r_ = 0; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE DepthwiseDirect2dConvSimtTileIterator &operator--() { // Do nothing return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. (vector loads) CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { Array<Element, Policy::LaneMmaShape::kN> *dst_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int p = 0; p < ThreadOutputShape::kH; ++p) { CUTLASS_PRAGMA_UNROLL for (int q = 0; q < ThreadOutputShape::kW; ++q) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kColumn; ++n) { const int h = p * StrideShape::kRow + iterator_r_ * DilationShape::kRow; const int w = q * StrideShape::kColumn + iterator_s_ * DilationShape::kColumn; dst_ptr[n + q + p * ThreadOutputShape::kW] = activation[h][w][n]; } } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { // Do nothing at present. } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag, Index pointer_offset) const { store_with_pointer_offset(frag, 0); } CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; } // namespace warp } // namespace conv } // namespace cutlass
cutlass/include/cutlass/conv/warp/mma_depthwise_simt_tile_iterator.h/0
{ "file_path": "cutlass/include/cutlass/conv/warp/mma_depthwise_simt_tile_iterator.h", "repo_id": "cutlass", "token_count": 11926 }
22
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing elementwise operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/epilogue/collective/detail.hpp" #include "cute/tensor.hpp" #include "cute/numeric/numeric_types.hpp" #include "cutlass/trace.h" #include "cutlass/cuda_host_adapter.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace collective { ///////////////////////////////////////////////////////////////////////////////////////////////// // Applies an element wise operation to all elements within the fragment // and writes them out to destination storage. template < class StrideC_, class StrideD_, class ThreadEpilogueOp_, class EpilogueSchedule_ > class DefaultEpilogueArray { public: // // Type Aliases // using EpilogueSchedule = EpilogueSchedule_; // derived types of output thread level operator using ThreadEpilogueOp = ThreadEpilogueOp_; using ElementOutput = typename ThreadEpilogueOp::ElementOutput; using ElementAccumulator = typename ThreadEpilogueOp::ElementAccumulator; using ElementCompute = typename ThreadEpilogueOp::ElementCompute; using ElementScalar = ElementCompute; using ElementC = typename ThreadEpilogueOp::ElementC; using StrideC = StrideC_; using UnderlyingStrideC = cute::remove_pointer_t<StrideC>; using ElementD = typename ThreadEpilogueOp::ElementD; using StrideD = StrideD_; using UnderlyingStrideD = cute::remove_pointer_t<StrideD>; using GmemTiledCopyC = void; using GmemTiledCopyD = void; static const int kOutputAlignment = ThreadEpilogueOp::kCount; using AlignmentType = typename cute::uint_bit<sizeof_bits<ElementOutput>::value * kOutputAlignment>::type; static_assert(cute::is_same_v<EpilogueSchedule, PtrArrayNoSmemWarpSpecialized>, "Incompatible epilogue schedule."); static_assert(rank(UnderlyingStrideC{}) == 3, "StrideCD must be rank-3: [M, N, L]"); static_assert(rank(UnderlyingStrideD{}) == 3, "StrideCD must be rank-3: [M, N, L]"); struct SharedStorage { }; // Host side epilogue arguments struct Arguments { typename ThreadEpilogueOp::Params thread{}; ElementC const** ptr_C = nullptr; StrideC dC{}; ElementD** ptr_D = nullptr; StrideD dD{}; }; // Device side epilogue params using Params = Arguments; // // Methods // template <class ProblemShape> static constexpr Params to_underlying_arguments( ProblemShape const&, Arguments const& args, [[maybe_unused]] void* workspace) { return args; } template <class ProblemShape> static size_t get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) { return 0; } template <class ProblemShape> static cutlass::Status initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream, CudaHostAdapter* cuda_adapter = nullptr) { return cutlass::Status::kSuccess; } template<class ProblemShape> CUTLASS_HOST_DEVICE static bool can_implement( [[maybe_unused]] ProblemShape const& problem_shape, [[maybe_unused]] Arguments const& args) { return true; } CUTLASS_HOST_DEVICE DefaultEpilogueArray(Params const& params_) : params(params_) { } CUTLASS_DEVICE bool is_source_needed() { // For Ptr-Array or Grouped Gemm we cannot determine if source is needed based on first beta. return true; } template< class ProblemShapeMNKL, class BlockShapeMNK, class BlockCoordMNKL, class FrgEngine, class FrgLayout, class TiledMma, class ResidueMNK > CUTLASS_HOST_DEVICE void operator()( ProblemShapeMNKL problem_shape_mnkl, BlockShapeMNK blk_shape_MNK, BlockCoordMNKL blk_coord_mnkl, cute::Tensor<FrgEngine, FrgLayout> const& accumulators, TiledMma tiled_mma, ResidueMNK residue_mnk, int thread_idx, [[maybe_unused]] char* smem_buf) { using namespace cute; using X = Underscore; static_assert(rank(ProblemShapeMNKL{}) == 4, "ProblemShapeMNKL must be rank 4"); static_assert(is_static<BlockShapeMNK>::value, "ThreadBlock tile shape must be static"); static_assert(rank(BlockShapeMNK{}) == 3, "BlockShapeMNK must be rank 3"); static_assert(rank(BlockCoordMNKL{}) == 4, "BlockCoordMNKL must be rank 3"); // Separate out problem shape for convenience auto M = get<0>(problem_shape_mnkl); auto N = get<1>(problem_shape_mnkl); auto L = get<3>(problem_shape_mnkl); // Batches are managed by using appropriate pointers to C and D matrices const int32_t mock_L = 1; const int32_t mock_l_coord = 0; // Slice to get the tile this CTA is responsible for auto [m_coord, n_coord, k_coord, l_coord] = blk_coord_mnkl; // If scalar alpha/beta are provided, i.e., same alpha/beta applies to all batches/groups. // If pointers to alpha/beta are provided, i.e., alpha/beta can differ between batches/groups, // we get the correct alpha/beta values for the current batch/group using group index. ThreadEpilogueOp epilogue_op = ThreadEpilogueOp(params.thread, l_coord); if (epilogue_op.is_source_needed() && params.dC == nullptr) { // Beta value is non-zero while pointer to C is a nullptr assert(0); } UnderlyingStrideC stride_c; UnderlyingStrideD stride_d; if constexpr (!cute::is_same_v<UnderlyingStrideC, StrideC>) { // If grouped gemm if (epilogue_op.is_source_needed()) { stride_c = detail::get_epilogue_stride<EpilogueSchedule>(params.dC[l_coord]); } stride_d = detail::get_epilogue_stride<EpilogueSchedule>(params.dD[l_coord]); } else { stride_c = detail::get_epilogue_stride<EpilogueSchedule>(params.dC); stride_d = detail::get_epilogue_stride<EpilogueSchedule>(params.dD); } // Represent the full output tensor ElementC const* ptr_C_l = nullptr; if (epilogue_op.is_source_needed()) { ptr_C_l = params.ptr_C[l_coord]; } Tensor mC_mnl = make_tensor(make_gmem_ptr(ptr_C_l), make_shape(M,N,mock_L), stride_c); // (m,n,l) Tensor mD_mnl = make_tensor(make_gmem_ptr(params.ptr_D[l_coord]), make_shape(M,N,mock_L), stride_d); // (m,n,l) Tensor gC_mnl = local_tile(mC_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l) Tensor gD_mnl = local_tile(mD_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l) Tensor gC = gC_mnl(_,_,m_coord,n_coord, mock_l_coord); // (BLK_M,BLK_N) Tensor gD = gD_mnl(_,_,m_coord,n_coord, mock_l_coord); // (BLK_M,BLK_N) // Partition source and destination tiles to match the accumulator partitioning auto thr_mma = tiled_mma.get_thread_slice(thread_idx); Tensor tCgD = thr_mma.partition_C(gD); // (VEC,THR_M,THR_N) Tensor tCgC = thr_mma.partition_C(gC); // (VEC,THR_M,THR_N) static_assert(is_static<FrgLayout>::value, "Accumulator layout must be static"); CUTE_STATIC_ASSERT_V(size(tCgC) == size(tCgD), "Source and destination must have the same number of elements."); CUTE_STATIC_ASSERT_V(size(tCgD) == size(accumulators), "Accumulator count must have the same destination element count."); // Make an identity coordinate tensor for predicating our output MN tile auto cD = make_identity_tensor(make_shape(unwrap(shape<0>(gD)), unwrap(shape<1>(gD)))); Tensor tCcD = thr_mma.partition_C(cD); // source is needed if (epilogue_op.is_source_needed()) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < size(accumulators); ++i) { if (elem_less(tCcD(i), make_coord(get<0>(residue_mnk), get<1>(residue_mnk)))) { tCgD(i) = epilogue_op(accumulators(i), tCgC(i)); } } } // source is not needed, avoid load else { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < size(accumulators); ++i) { if (elem_less(tCcD(i), make_coord(get<0>(residue_mnk), get<1>(residue_mnk)))) { tCgD(i) = epilogue_op(accumulators(i)); } } } } private: Params params; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace collective } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/collective/default_epilogue_array.hpp/0
{ "file_path": "cutlass/include/cutlass/epilogue/collective/default_epilogue_array.hpp", "repo_id": "cutlass", "token_count": 3907 }
23
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue functor specialized for residual blocks in deep neural networks. */ #pragma once #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/detail.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { /// Models a residual block of the form: UnaryOp(BinaryOp(BinaryOp(ActivationOp(TensorOp(X) + bias), residual1), residual2)) template <typename ElementOutput_, typename ElementAccumulator_, typename ElementCompute_, typename ElementC_, int ElementsPerAccess, template <typename T> class ActivationOp_, template <typename T> class BinaryOp1_, template <typename T> class UnaryOp_, template <typename T> class BinaryOp2_ = detail::NoOp, bool StoreT_ = false, typename ElementVector_ = ElementC_> class LinearCombinationResidualBlock { public: static bool const kIsSingleSource = false; using ElementOutput = ElementC_; using ElementC = ElementC_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; using ElementVector = ElementVector_; static int const kElementsPerAccess = ElementsPerAccess; static int const kCount = kElementsPerAccess; using UnaryOp = UnaryOp_<Array<ElementCompute, kCount>>; using BinaryOp1 = BinaryOp1_<Array<ElementCompute, kCount>>; using BinaryOp2 = BinaryOp2_<Array<ElementCompute, kCount>>; using ActivationOp = ActivationOp_<Array<ElementCompute, kCount>>; using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>; using FragmentCompute = Array<ElementCompute, kElementsPerAccess>; using FragmentC = Array<ElementC, kElementsPerAccess>; using FragmentOutput = Array<ElementOutput, kElementsPerAccess>; using ElementZ = ElementOutput_; using ElementT = ElementZ; using FragmentZ = Array<ElementZ, kElementsPerAccess>; using FragmentT = Array<ElementT, kElementsPerAccess>; static bool const kIsHeavy = true; static bool const kStoreZ = true; static bool const kStoreT = StoreT_; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales residual input ElementCompute const *alpha_ptr{nullptr}; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr{nullptr}; ///< pointer to residual scalar - if not null, loads it from memory CUTLASS_HOST_DEVICE Params() : alpha(ElementCompute(1)), beta(ElementCompute(1)) {} CUTLASS_HOST_DEVICE Params(ElementCompute alpha, ElementCompute beta) : alpha(alpha), beta(beta) {} CUTLASS_HOST_DEVICE Params(ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr) : alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {} }; private: ElementCompute alpha_; ElementCompute beta_; bool skip_elementwise_; public: /// Constructor from Params CUTLASS_HOST_DEVICE LinearCombinationResidualBlock(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); skip_elementwise_ = false; } /// The "source" tensor corresponds to the residual input CUTLASS_HOST_DEVICE bool is_source_needed() const { return true; } /// Functionally required for serial reduction in the epilogue /// IMPORTANT: Split-k is supported only when ActivationOp is Identity. CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { skip_elementwise_ = true; } } /// Applies the operation UnaryOp(BinaryOp(BinaryOp(ActivationOp(AB + bias), residual1), residual2)) CUTLASS_HOST_DEVICE void operator()(FragmentOutput &frag_Z, FragmentOutput &, FragmentAccumulator const &AB, FragmentC const &residual1, FragmentC const &residual2, FragmentCompute const &bias) const { UnaryOp unary_op; BinaryOp1 binary_op1; BinaryOp2 binary_op2; ActivationOp activation; FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB); FragmentCompute tmp_residual1 = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(residual1); FragmentCompute tmp_residual2 = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(residual2); FragmentCompute z = binary_op2(binary_op1(activation(alpha_ * tmp_Accum + bias), beta_ * tmp_residual1), beta_ * tmp_residual2); FragmentCompute result_Z = skip_elementwise_ ? z : unary_op(z); NumericArrayConverter<ElementOutput, ElementCompute, kElementsPerAccess> convert_z; frag_Z = convert_z(result_Z); } /// Should never be called CUTLASS_HOST_DEVICE void operator()(FragmentOutput &, FragmentOutput &, FragmentAccumulator const &, FragmentCompute const &) const {} }; /// Models a residual block of the form: UnaryOp(BinaryOp(ActivationOp(TensorOp(X) + bias), residual)) template <typename ElementOutput_, typename ElementAccumulator_, typename ElementCompute_, typename ElementC_, int ElementsPerAccess, template <typename T> class ActivationOp_, template <typename T> class BinaryOp1_, template <typename T> class UnaryOp_, bool StoreT_, typename ElementVector_> class LinearCombinationResidualBlock<ElementOutput_, ElementAccumulator_, ElementCompute_, ElementC_, ElementsPerAccess, ActivationOp_, BinaryOp1_, UnaryOp_, detail::NoOp, StoreT_, ElementVector_> { public: static bool const kIsSingleSource = true; using ElementOutput = ElementC_; using ElementC = ElementC_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; using ElementVector = ElementVector_; static int const kElementsPerAccess = ElementsPerAccess; static int const kCount = kElementsPerAccess; using UnaryOp = UnaryOp_<Array<ElementCompute, kCount>>; using BinaryOp = BinaryOp1_<Array<ElementCompute, kCount>>; using ActivationOp = ActivationOp_<Array<ElementCompute, kCount>>; using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>; using FragmentCompute = Array<ElementCompute, kElementsPerAccess>; using FragmentC = Array<ElementC, kElementsPerAccess>; using FragmentOutput = Array<ElementOutput, kElementsPerAccess>; using ElementZ = ElementOutput_; using ElementT = ElementZ; using FragmentZ = Array<ElementZ, kElementsPerAccess>; using FragmentT = Array<ElementT, kElementsPerAccess>; static bool const kIsHeavy = true; static bool const kStoreZ = true; static bool const kStoreT = StoreT_; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales residual input ElementCompute const *alpha_ptr{nullptr}; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr{nullptr}; ///< pointer to residual scalar - if not null, loads it from memory CUTLASS_HOST_DEVICE Params() : alpha(ElementCompute(1)), beta(ElementCompute(1)) {} CUTLASS_HOST_DEVICE Params(ElementCompute alpha, ElementCompute beta) : alpha(alpha), beta(beta) {} CUTLASS_HOST_DEVICE Params(ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr) : alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {} }; private: ElementCompute alpha_; ElementCompute beta_; bool skip_elementwise_; public: /// Constructor from Params CUTLASS_HOST_DEVICE LinearCombinationResidualBlock(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); skip_elementwise_ = false; } /// The "source" tensor corresponds to the residual input CUTLASS_HOST_DEVICE bool is_source_needed() const { return true; } /// Functionally required for serial reduction in the epilogue /// IMPORTANT: Split-k is supported only when ActivationOp is Identity. CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { skip_elementwise_ = true; } } /// Applies the operation UnaryOp(BinaryOp(ActivationOp(AB + bias), residual)) CUTLASS_HOST_DEVICE void operator()(FragmentOutput &frag_Z, FragmentOutput &, FragmentAccumulator const &AB, FragmentC const &residual, FragmentCompute const &bias) const { UnaryOp unary_op; BinaryOp binary_op; ActivationOp activation; FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB); FragmentCompute tmp_residual = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(residual); FragmentCompute z = binary_op(activation(alpha_ * tmp_Accum + bias), beta_ * tmp_residual); FragmentCompute result_Z = skip_elementwise_ ? z : unary_op(z); NumericArrayConverter<ElementOutput, ElementCompute, kElementsPerAccess> convert_z; frag_Z = convert_z(result_Z); } /// Should never be called CUTLASS_HOST_DEVICE void operator()(FragmentOutput &, FragmentOutput &, FragmentAccumulator const &, FragmentCompute const &) const {} }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/thread/linear_combination_residual_block.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/thread/linear_combination_residual_block.h", "repo_id": "cutlass", "token_count": 4063 }
24
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/threadblock/epilogue_with_broadcast.h" #include "cutlass/epilogue/threadblock/epilogue_streamk_with_broadcast.h" #include "cutlass/layout/permute.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for SimtOps. template < typename Shape, typename WarpMmaSimt, typename ElementOutput, typename ElementTensor, typename ElementVector, typename OutputOp, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute > struct DefaultEpilogueWithBroadcastSimt { /// Use defaults related to the existing epilogue using Base = DefaultEpilogueSimt< Shape, WarpMmaSimt, OutputOp, ElementsPerAccess >; // // Stores the result z = (y = GEMM(A, B, C), broadcast) // using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout >; // // Additional tensor tile iterator - stores t = Elementwise(z) // using TensorTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, ElementTensor >; /// Define the epilogue using Epilogue = EpilogueWithBroadcast< Shape, WarpMmaSimt, Base::kPartitionsK, OutputTileIterator, TensorTileIterator, ElementVector, typename Base::AccumulatorFragmentIterator, typename Base::WarpTileIterator, typename Base::SharedLoadIterator, OutputOp, typename Base::Padding >; }; //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for strided dgrad epilogues for SimtOps. template < typename Shape, typename WarpMmaSimt, typename ElementOutput, typename ElementTensor, typename ElementVector, typename OutputOp, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute > struct DefaultEpilogueWithBroadcastSimtStridedDgrad { /// Use defaults related to the existing epilogue using Base = DefaultEpilogueSimtStridedDgrad< Shape, WarpMmaSimt, OutputOp, ElementsPerAccess >; // // Stores the result z = (y = GEMM(A, B, C), broadcast) // using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad< typename Base::OutputTileThreadMap, ElementOutput >; // // Additional tensor tile iterator - stores t = Elementwise(z) // using TensorTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad< typename Base::OutputTileThreadMap, ElementTensor >; /// Define the epilogue using Epilogue = EpilogueWithBroadcast< Shape, WarpMmaSimt, Base::kPartitionsK, OutputTileIterator, TensorTileIterator, ElementVector, typename Base::AccumulatorFragmentIterator, typename Base::WarpTileIterator, typename Base::SharedLoadIterator, OutputOp, typename Base::Padding >; }; //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename ElementTensor, typename ElementVector, typename OutputOp, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute > struct DefaultEpilogueWithBroadcastTensorOp { /// Use defaults related to the existing epilogue using Base = DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, ElementsPerAccess >; // // Stores the result z = (y = GEMM(A, B, C), broadcast) // using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout >; // // Additional tensor tile iterator - stores t = Elementwise(z) // using TensorTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, ElementTensor >; /// Define the epilogue using Epilogue = EpilogueWithBroadcast< Shape, WarpMmaTensorOp, PartitionsK, OutputTileIterator, TensorTileIterator, ElementVector, typename Base::AccumulatorFragmentIterator, typename Base::WarpTileIterator, typename Base::SharedLoadIterator, OutputOp, typename Base::Padding, Base::kFragmentsPerIteration >; }; //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for streamk epilogues for TensorOps. template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename ElementTensor, typename ElementVector, typename OutputOp, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute > struct DefaultStreamkEpilogueWithBroadcastTensorOp { /// Use defaults related to the existing epilogue using Base = DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, ElementsPerAccess >; // // Stores the result z = (y = GEMM(A, B, C), broadcast) // using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout >; // // Additional tensor tile iterator - stores t = Elementwise(z) // using TensorTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, ElementTensor >; /// Define the epilogue using Epilogue = EpilogueStreamkWithBroadcast< Shape, WarpMmaTensorOp, PartitionsK, OutputTileIterator, TensorTileIterator, ElementVector, typename Base::AccumulatorFragmentIterator, typename Base::WarpTileIterator, typename Base::SharedLoadIterator, OutputOp, typename Base::Padding, Base::kFragmentsPerIteration >; }; //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for VoltaTensorOps. template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename ElementTensor, typename ElementVector, typename OutputOp, int ElementsPerAccess > struct DefaultEpilogueWithBroadcastVoltaTensorOp { /// Use defaults related to the existing epilogue using Base = DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, ElementsPerAccess >; // // Stores the result z = (y = GEMM(A, B, C), broadcast) // using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, ElementOutput >; // // Additional tensor tile iterator - stores t = Elementwise(z) // using TensorTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, ElementTensor >; /// Define the epilogue using Epilogue = EpilogueWithBroadcast< Shape, WarpMmaTensorOp, PartitionsK, OutputTileIterator, TensorTileIterator, ElementVector, typename Base::AccumulatorFragmentIterator, typename Base::WarpTileIterator, typename Base::SharedLoadIterator, OutputOp, typename Base::Padding >; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h", "repo_id": "cutlass", "token_count": 3208 }
25
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #include <cuda/std/utility> #else #include <assert.h> #include <utility> #endif #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/functional.h" #include "cutlass/fast_math.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" #include "cutlass/epilogue/threadblock/epilogue_base_streamk.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/numeric_types.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// This base class is meant to define the concept required of the /// EpilogueStreamkWithBroadcast::OutputOp template < typename ElementC_, typename ElementAccumulator_, typename ElementCompute_, typename ElementZ_, typename ElementT_, int ElementsPerAccess, bool StoreZ = true, bool StoreT = true > struct EpilogueStreamkWithBroadcastOpBase : EpilogueWithBroadcastOpBase< ElementC_, ElementAccumulator_, ElementCompute_, ElementZ_, ElementT_, ElementsPerAccess, StoreZ, StoreT > { /// Parameters structure - required struct Params { }; // // Methods // /// Constructor from Params EpilogueStreamkWithBroadcastOpBase(Params const &params_) { } }; //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator with bias vector broadcast over columns. /// /// Computes the following: /// /// /// Z, T = OutputOp(AB, C, Broadcast) /// /// if (ElementwiseOp::kStoreZ) { /// store(converted_u); /// } /// /// if (ElementwiseOp::kStoreT) { /// store(v); /// } /// template < typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors (z) typename TensorTileIterator_, ///< Additional tile iterator for tensor-valued operands (t) typename ElementVector_, ///< Pointer to broadcast vector typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename OutputOp_, ///< Output operator - concept is EpilogueWithBroadcastOp typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape) int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large (!IsEpilogueFunctorHeavy<OutputOp_>::value), bool IsSingleSource = OutputOp_::kIsSingleSource > class EpilogueStreamkWithBroadcast; ///////////////////////////////////////////////////////////////////////////////////////////////// /// EpilogueStreamkWithBroadcast: Two sources template < typename Shape_, typename WarpMmaOperator_, int PartitionsK, typename OutputTileIterator_, typename TensorTileIterator_, typename ElementVector_, typename AccumulatorFragmentIterator_, typename WarpTileIterator_, typename SharedLoadIterator_, typename OutputOp_, typename Padding_, int FragmentsPerPartition, int IterationsUnroll > class EpilogueStreamkWithBroadcast< Shape_, WarpMmaOperator_, PartitionsK, OutputTileIterator_, TensorTileIterator_, ElementVector_, AccumulatorFragmentIterator_, WarpTileIterator_, SharedLoadIterator_, OutputOp_, Padding_, FragmentsPerPartition, IterationsUnroll, false > : public EpilogueWithBroadcast< Shape_, WarpMmaOperator_, PartitionsK, OutputTileIterator_, TensorTileIterator_, ElementVector_, AccumulatorFragmentIterator_, WarpTileIterator_, SharedLoadIterator_, OutputOp_, Padding_, FragmentsPerPartition, IterationsUnroll, false>, public EpilogueBaseStreamK< Shape_, PartitionsK, WarpMmaOperator_, AccumulatorFragmentIterator_> { public: using Base = EpilogueWithBroadcast< Shape_, WarpMmaOperator_, PartitionsK, OutputTileIterator_, TensorTileIterator_, ElementVector_, AccumulatorFragmentIterator_, WarpTileIterator_, SharedLoadIterator_, OutputOp_, Padding_, FragmentsPerPartition, IterationsUnroll, false>; using BaseStreamK = EpilogueBaseStreamK< Shape_, PartitionsK, WarpMmaOperator_, AccumulatorFragmentIterator_>; using Shape = Shape_; static int const kPartitionsK = PartitionsK; using OutputTileIterator = OutputTileIterator_; using TensorTileIterator = TensorTileIterator_; using ElementVector = ElementVector_; using SharedLoadIterator = SharedLoadIterator_; using OutputOp = OutputOp_; /// Fragment type used by the accumulator tile's fragment iterator using AccumulatorFragment = typename Base::AccumulatorFragmentIterator::Fragment; /// Shared storage structure (shadows base) with additional SMEM buffer for reduction using SharedStorage = typename Base::SharedStorage; public: /// Constructor CUTLASS_DEVICE EpilogueStreamkWithBroadcast( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): Base(shared_storage, thread_idx, warp_idx, lane_idx), BaseStreamK(thread_idx) { } /// Aggregates the accumulator sets shared by peer blocks in the global workspace, /// performing epilogue computations, writing to output CUTLASS_DEVICE void reduce( int peer_idx_begin, int peer_idx_end, int reduce_fragment_idx, void *element_workspace, OutputOp const &output_op, ///< Output operator ElementVector const * broadcast_ptr, ///< Broadcast vector OutputTileIterator destination_iterator, ///< Tile iterator for destination OutputTileIterator source_iterator1, ///< Tile iterator for first source accumulator matrix OutputTileIterator source_iterator2, ///< Tile iterator for second source accumulator matrix TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses MatrixCoord(Shape::kM, Shape::kN), MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space MatrixCoord()) { // Reduce peer accumulator fragments into one fragment AccumulatorFragment accum_fragment; BaseStreamK::reduce(accum_fragment, peer_idx_begin, peer_idx_end, reduce_fragment_idx, element_workspace); // Store fragment to shared memory this->warp_tile_iterator_.store(accum_fragment); __syncthreads(); Base::reduce(reduce_fragment_idx, output_op, broadcast_ptr, destination_iterator, source_iterator1, source_iterator2, tensor_iterator, problem_size, threadblock_offset); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// EpilogueStreamkWithBroadcast: Single source template < typename Shape_, typename WarpMmaOperator_, int PartitionsK, typename OutputTileIterator_, typename TensorTileIterator_, typename ElementVector_, typename AccumulatorFragmentIterator_, typename WarpTileIterator_, typename SharedLoadIterator_, typename OutputOp_, typename Padding_, int FragmentsPerPartition, int IterationsUnroll > class EpilogueStreamkWithBroadcast< Shape_, WarpMmaOperator_, PartitionsK, OutputTileIterator_, TensorTileIterator_, ElementVector_, AccumulatorFragmentIterator_, WarpTileIterator_, SharedLoadIterator_, OutputOp_, Padding_, FragmentsPerPartition, IterationsUnroll, true > : public EpilogueWithBroadcast< Shape_, WarpMmaOperator_, PartitionsK, OutputTileIterator_, TensorTileIterator_, ElementVector_, AccumulatorFragmentIterator_, WarpTileIterator_, SharedLoadIterator_, OutputOp_, Padding_, FragmentsPerPartition, IterationsUnroll, true>, public EpilogueBaseStreamK< Shape_, PartitionsK, WarpMmaOperator_, AccumulatorFragmentIterator_> { public: using Base = EpilogueWithBroadcast< Shape_, WarpMmaOperator_, PartitionsK, OutputTileIterator_, TensorTileIterator_, ElementVector_, AccumulatorFragmentIterator_, WarpTileIterator_, SharedLoadIterator_, OutputOp_, Padding_, FragmentsPerPartition, IterationsUnroll, true>; using BaseStreamK = EpilogueBaseStreamK< Shape_, PartitionsK, WarpMmaOperator_, AccumulatorFragmentIterator_>; using Shape = Shape_; static int const kPartitionsK = PartitionsK; using OutputTileIterator = OutputTileIterator_; using TensorTileIterator = TensorTileIterator_; using ElementVector = ElementVector_; using SharedLoadIterator = SharedLoadIterator_; using OutputOp = OutputOp_; /// Fragment type used by the accumulator tile's fragment iterator using AccumulatorFragment = typename Base::AccumulatorFragmentIterator::Fragment; /// Shared storage structure (shadows base) with additional SMEM buffer for reduction using SharedStorage = typename Base::SharedStorage; public: /// Constructor CUTLASS_DEVICE EpilogueStreamkWithBroadcast( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): Base(shared_storage, thread_idx, warp_idx, lane_idx), BaseStreamK(thread_idx) { } /// Aggregates the accumulator sets shared by peer blocks in the global workspace, /// performing epilogue computations, writing to output CUTLASS_DEVICE void reduce( int peer_idx_begin, int peer_idx_end, int reduce_fragment_idx, void *element_workspace, OutputOp const &output_op, ///< Output operator ElementVector const * broadcast_ptr, ///< Broadcast vector OutputTileIterator destination_iterator, ///< Tile iterator for destination OutputTileIterator source_iterator, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses MatrixCoord(Shape::kM, Shape::kN), MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space MatrixCoord()) { // Reduce peer accumulator fragments into one fragment AccumulatorFragment accum_fragment; BaseStreamK::reduce(accum_fragment, peer_idx_begin, peer_idx_end, reduce_fragment_idx, element_workspace); // Store fragment to shared memory this->warp_tile_iterator_.store(accum_fragment); __syncthreads(); Base::reduce(reduce_fragment_idx, output_op, broadcast_ptr, destination_iterator, source_iterator, tensor_iterator, problem_size, threadblock_offset); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/epilogue_streamk_with_broadcast.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/epilogue_streamk_with_broadcast.h", "repo_id": "cutlass", "token_count": 5491 }
26
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/permute.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/memory.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/conv3d_problem_size.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { //////////////////////////////////////////////////////////////////////////////// namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load and store output tile from global memory in epilogue. /// /// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_, ///< Element data type bool ScatterD = false, ///< Scatter D operand or not typename PermuteDLayout = layout::NoPermute, ///< Permute D operand or not bool UseCUDAStore = false > class PredicatedTileIterator { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::Shape; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kThreads = ThreadMap::kThreads; static int const kIterations = ThreadMap::Count::kTile; static bool constexpr PermuteD = !layout::is_trivial_permute<PermuteDLayout>; static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0"); static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0"); static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0"); static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0"); /// Fragment object using Fragment = Array< Element, ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; // // Parameters struct // /// Uses a non-template class struct Params : PredicatedTileIteratorParams { using Base = PredicatedTileIteratorParams; CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Layout const &layout): PredicatedTileIteratorParams( layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess, make_OutputTileThreadMapDesc<ThreadMap>() ) { } CUTLASS_HOST_DEVICE Params(Layout const &layout, // Not needed. Added to be compatible with strided conv epilogue. conv::Conv2dProblemSize const &problem_size): Params(layout) { } CUTLASS_HOST_DEVICE Params(Layout const &layout, // Not needed. Added to be compatible with strided conv epilogue. conv::Conv3dProblemSize const &problem_size): Params(layout) { } CUTLASS_HOST_DEVICE Params(Base const &base) : Base(base) { } }; /// Mask object struct Mask { static int const kCount = ThreadMap::Iterations::kColumn; /// Predicate state bool predicates[kCount]; // // Mask // CUTLASS_HOST_DEVICE Mask() { enable(); } ///< Efficiently disables all accesses guarded by mask CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = false; } } ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask CUTLASS_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = true; } } }; private: // // Data members // /// Parameters structure containing reference and precomputed state. PredicatedTileIteratorParams params_; /// Byte-level pointer. This pointer is usually for both load() and store(), unless PermuteD is performed. When having PermuteD, byte_pointer_ is only for load(). uint8_t *byte_pointer_; /// Byte-level pointer for store(). Due to PermuteD Op, store_byte_pointer_ may be with different address computation compared to byte_pointer_. uint8_t *store_byte_pointer_; /// Array of boolean values to contain steady-state predicates Mask mask_; /// Extent of the matrix tile in rows Index extent_row_; /// Extent of the matrix tile in rows Index extent_column_; /// A thread's starting row position (assuming steady-state predicates have been computed) Index thread_start_row_; /// A thread's starting column Index thread_start_column_; /// Internal state counter int state_[3]; /// Scatter indices int const *indices_; /// PermuteDLayout PermuteDLayout permute_layout_; // // Static asserts about internal strides // static_assert(sizeof(extent_row_) == 4, "Expected 32b extents"); static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents"); static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides"); private: // // Methods // public: // // Methods // /// Constructor CUTLASS_DEVICE PredicatedTileIterator( PredicatedTileIteratorParams const & params, Element *pointer, TensorCoord extent, int thread_idx, TensorCoord threadblock_offset = TensorCoord(), int const *indices = nullptr ): params_(params), indices_(indices), permute_layout_(PitchLinearCoord(extent.column(), extent.row()), params_.stride * kElementsPerAccess / sizeof(AccessType)) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset; extent_row_ = extent.row(); extent_column_ = extent.column(); thread_start_row_ = thread_offset.row(); thread_start_column_ = thread_offset.column(); // Initialize predicates CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) { mask_.predicates[c] = ((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column()); } // Null pointer performs no accesses if (!pointer) { mask_.clear(); } if (ScatterD && !indices) { mask_.clear(); } // Initialize byte_pointer_ byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) + LongIndex(thread_offset.row()) * LongIndex(params_.stride) + LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess; if (ScatterD) { byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) + LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess; } // store_byte_pointer_ is set to be the same with byte_pointer_ unless PermuteD is used. store_byte_pointer_ = PermuteD ? reinterpret_cast<uint8_t *>(pointer) : byte_pointer_; // Initialize internal state counter state_[0] = state_[1] = state_[2] = 0; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { store_byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment &frag, int64_t byte_offset) const { uint8_t *byte_pointer = byte_pointer_; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; bool row_guard = ((row_offset + thread_start_row_) < extent_row_); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); if (ScatterD && row_guard) { assert(indices_); memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset + LongIndex(indices_[row_offset + thread_start_row_]) * LongIndex(params_.stride)); } CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { bool guard = row_guard && mask_.predicates[column]; cutlass::arch::global_load< AccessType, sizeof(AccessType) >( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess], guard); } if (row + 1 < ThreadMap::Iterations::kRow) { if (!ScatterD) { byte_pointer += params_.increment_row; } } } if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; } } if (cluster + 1 < ThreadMap::Iterations::kCluster) { byte_pointer += params_.increment_cluster; } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Stores a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) const { uint8_t *byte_pointer = store_byte_pointer_; AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; bool row_guard = ((row_offset + thread_start_row_) < extent_row_); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); if (ScatterD && row_guard) { assert(indices_); memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset + LongIndex(indices_[row_offset + thread_start_row_]) * LongIndex(params_.stride)); } CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { bool guard = row_guard && mask_.predicates[column]; if (PermuteD) { int col_offset = column * ThreadMap::Delta::kColumn; int col = col_offset + thread_start_column_; int row = row_offset + thread_start_row_; // Locate memory_pointer memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset + permute_layout_(PitchLinearCoord(col, row)) * sizeof(AccessType) / kElementsPerAccess); } if (UseCUDAStore) { if (guard) { memory_pointer[0] = frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column]; } } else { cutlass::arch::global_store<AccessType, sizeof(AccessType)>( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)&memory_pointer[0], guard); } if (!PermuteD) { memory_pointer += (ThreadMap::Delta::kColumn / kElementsPerAccess); } } if (row + 1 < ThreadMap::Iterations::kRow) { if (!ScatterD && !PermuteD) { byte_pointer += params_.increment_row; } } } if (group + 1 < ThreadMap::Iterations::kGroup) { if (!ScatterD && !PermuteD) { byte_pointer += params_.increment_group; } } } if (cluster + 1 < ThreadMap::Iterations::kCluster) { if (!ScatterD && !PermuteD) { byte_pointer += params_.increment_cluster; } } } } /// Stores a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) const { store_with_byte_offset(frag, 0); } /// Loads a fragment from memory CUTLASS_DEVICE void downsample_load_with_byte_offset(Fragment &frag, int64_t byte_offset, int convolution_P, int convolution_Q, int add_P, int add_Q, int problem_N) const { uint8_t *byte_pointer = byte_pointer_; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; bool row_guard = ((row_offset + thread_start_row_) < extent_row_); int output_row = row_offset + thread_start_row_; int output_N = output_row / (convolution_P * convolution_Q); int output_PQ = output_row % (convolution_P * convolution_Q); int output_P = output_PQ / convolution_Q; int output_Q = output_PQ % convolution_Q; int input_row = output_N * 2 * convolution_P * 2 * convolution_Q + (2 * output_P + add_P) * 2 * convolution_Q + 2 * output_Q + add_Q; int64_t byte_offset = (input_row-output_row)*problem_N*sizeof(float); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { bool guard = row_guard && mask_.predicates[column]; cutlass::arch::global_load< AccessType, sizeof(AccessType) >( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess], guard); } if (row + 1 < ThreadMap::Iterations::kRow) { byte_pointer += params_.increment_row; } } if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; } } if (cluster + 1 < ThreadMap::Iterations::kCluster) { byte_pointer += params_.increment_cluster; } } } /// Loads a fragment from memory CUTLASS_DEVICE void upsample_load_with_byte_offset(Fragment &frag, int64_t byte_offset, int convolution_P, int convolution_Q, int add_P, int add_Q, int problem_N) const { uint8_t *byte_pointer = byte_pointer_; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; bool row_guard = ((row_offset + thread_start_row_) < extent_row_); int output_row = row_offset + thread_start_row_; int output_N = output_row / (convolution_P * convolution_Q); int output_PQ = output_row % (convolution_P * convolution_Q); int output_P = output_PQ / convolution_Q; int output_Q = output_PQ % convolution_Q; int row_add_P = add_P; int row_add_Q = add_Q; if (output_P > convolution_P - 2) row_add_P = 0; if (output_Q > convolution_Q - 2) row_add_Q = 0; int input_row = output_N * (convolution_P/2) * (convolution_Q/2) + ((output_P + row_add_P)/2) * (convolution_Q/2) + (output_Q + row_add_Q)/2; int64_t byte_offset = (input_row-output_row)*problem_N*sizeof(float); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { bool guard = row_guard && mask_.predicates[column]; cutlass::arch::global_load< AccessType, sizeof(AccessType) >( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess], guard); } if (row + 1 < ThreadMap::Iterations::kRow) { byte_pointer += params_.increment_row; } } if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; } } if (cluster + 1 < ThreadMap::Iterations::kCluster) { byte_pointer += params_.increment_cluster; } } } CUTLASS_DEVICE MatrixCoord thread_start() const { return MatrixCoord(thread_start_row_, thread_start_column_); } /// Need to get the thread start row from the tile iterator CUTLASS_DEVICE int32_t thread_start_row() const { return thread_start_row_; } /// Need to get the thread start row from the tile iterator CUTLASS_DEVICE int32_t thread_start_column() const { return thread_start_column_; } /// Extent of the matrix in rows CUTLASS_DEVICE Index extent_row() const { return extent_row_; } /// Extent of the matrix in columns CUTLASS_DEVICE Index extent_column() const { return extent_column_; } /// Advances to the next position to load or store CUTLASS_HOST_DEVICE PredicatedTileIterator &operator++() { ++state_[0]; if (!ScatterD) { byte_pointer_ += params_.advance_row; } if (!ScatterD && !PermuteD) { store_byte_pointer_ += params_.advance_row; } thread_start_row_ += ThreadMap::Shape::kRow; if (state_[0] == ThreadMap::Count::kRow) { state_[0] = 0; ++state_[1]; if (!ScatterD) { byte_pointer_ += params_.advance_group; } if (!ScatterD && !PermuteD) { store_byte_pointer_ += params_.advance_group; } thread_start_row_ += (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow; if (state_[1] == ThreadMap::Count::kGroup) { state_[1] = 0; ++state_[2]; if (!ScatterD) { byte_pointer_ += params_.advance_cluster; } if (!ScatterD && !PermuteD) { store_byte_pointer_ += params_.advance_cluster; } thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow; if (state_[2] == ThreadMap::Count::kCluster) { state_[2] = 0; if (!ScatterD) { byte_pointer_ += params_.advance_tile; } if (!ScatterD && !PermuteD) { store_byte_pointer_ += params_.advance_tile; } thread_start_row_ += ThreadMap::Shape::kGroup * ThreadMap::Shape::kRow * ThreadMap::Shape::kCluster * ThreadMap::Shape::kTile; } } } return *this; } /// Advances a number of positions to load or store CUTLASS_HOST_DEVICE PredicatedTileIterator &operator+=(int increment) { // Row state_[0] += increment; int increment_row = state_[0] / ThreadMap::Count::kRow; state_[0] = state_[0] % ThreadMap::Count::kRow; byte_pointer_ += (params_.advance_row * increment); store_byte_pointer_ += (params_.advance_row * increment); thread_start_row_ += (ThreadMap::Shape::kRow * increment); // Group state_[1] += increment_row; int increment_group = state_[1] / ThreadMap::Count::kGroup; state_[1] = state_[1] % ThreadMap::Count::kGroup; byte_pointer_ += (params_.advance_group * increment_row); store_byte_pointer_ += (params_.advance_group * increment_row); thread_start_row_ += (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow * increment_row; // Cluster state_[2] += increment_group; int increment_cluster = state_[2] / ThreadMap::Count::kCluster; state_[2] = state_[2] % ThreadMap::Count::kCluster; byte_pointer_ += (params_.advance_cluster * increment_group); store_byte_pointer_ += (params_.advance_cluster * increment_group); thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow * increment_group; // Tile byte_pointer_ += (params_.advance_tile * increment_cluster); store_byte_pointer_ += (params_.advance_tile * increment_cluster); thread_start_row_ += ThreadMap::Shape::kGroup * ThreadMap::Shape::kRow * ThreadMap::Shape::kCluster * ThreadMap::Shape::kTile * increment_cluster; return *this; } ///< Efficiently disables all accesses guarded by mask CUTLASS_DEVICE void clear_mask() { mask_.clear(); } ///< Efficiently enables all accesses guarded by mask CUTLASS_DEVICE void enable_mask() { mask_.enable(); } ///< Sets the mask CUTLASS_DEVICE void get_mask(Mask &mask) const { mask = mask_; } ///< Sets the mask CUTLASS_DEVICE void set_mask(Mask const &mask) { mask_ = mask; } }; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load output tile from global memory in epilogue. /// /// Satisfies: ReadableTileIterator | InterleavedPredicatedTileIterator | ForwardTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_, ///< Element data type int InterleavedN ///< Number of Interleaved N > class InterleavedPredicatedTileIterator { public: using ThreadMap = ThreadMap_; using Element = Element_; using Layout = layout::ColumnMajorInterleaved<InterleavedN>; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = layout::PitchLinearCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kThreads = ThreadMap::kThreads; static int const kIterations = ThreadMap::Iterations::kCount; /// Fragment object using Fragment = Array<Element, ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; /// Uses a non-template class struct Params : InterleavedPredicatedTileIteratorParams { using Base = InterleavedPredicatedTileIteratorParams; CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Layout const &layout): Base( layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess, make_InterleavedPredicatedTileIteratorDesc<Element, ThreadMap>() ) { } CUTLASS_HOST_DEVICE Params(Base const &base) : Base(base) { } }; /// Mask object struct Mask { static int const kCount = (ThreadMap::Iterations::kContiguous < 8) ? 8 : ThreadMap::Iterations::kContiguous; /// Predicate state bool predicates[kCount]; // // Mask // CUTLASS_HOST_DEVICE Mask() { enable(); } ///< Efficiently disables all accesses guarded by mask CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = false; } } ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask CUTLASS_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = true; } } }; private: // // Data members // /// Parameters structure containing reference and precomputed state. Params params_; /// Byte-level pointer uint8_t *byte_pointer_; /// Array of boolean values to contain steady-state predicates Mask mask_; /// Extent of the matrix tile in columns Index extent_col_; /// A thread's starting column position (assuming steady-state predicates have /// been computed) Index thread_start_col_; /// Internal iteration counter int iteration_contiguous_; int iteration_strided_; private: // // Methods // public: // // Methods // /// Constructor CUTLASS_DEVICE InterleavedPredicatedTileIterator( Params const & params, Element *pointer, TensorCoord extent, int thread_idx, TensorCoord threadblock_offset, int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ): params_(params) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + TensorCoord(threadblock_offset.contiguous() * InterleavedN, threadblock_offset.strided() / InterleavedN); extent_col_ = extent.strided() / InterleavedN; thread_start_col_ = thread_offset.strided(); // Initialize predicates CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { mask_.predicates[c] = ((thread_offset.contiguous() + ThreadMap::Delta::kContiguous * c) < (extent.contiguous() * InterleavedN)); } // Initialize pointer byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) + LongIndex(thread_offset.strided()) * LongIndex(params_.stride) + LongIndex(thread_offset.contiguous()) * sizeof(AccessType) / kElementsPerAccess; // Initialize internal state counter iteration_contiguous_ = iteration_strided_ = 0; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { uint8_t *byte_pointer = byte_pointer_; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer); int col_offset = iteration_strided_ * ThreadMap::Delta::kStrided; bool col_guard = ((thread_start_col_ + col_offset) < extent_col_); bool guard = col_guard && mask_.predicates[iteration_contiguous_]; cutlass::arch::global_load< AccessType, sizeof(AccessType) >( *frag_ptr, (void *)memory_pointer, guard); } /// Stores a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { uint8_t *byte_pointer = byte_pointer_; AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer); int col_offset = iteration_strided_ * ThreadMap::Delta::kStrided; bool col_guard = ((thread_start_col_ + col_offset) < extent_col_); bool guard = col_guard && mask_.predicates[iteration_contiguous_]; cutlass::arch::global_store<AccessType, sizeof(AccessType)>( *frag_ptr, (void *)memory_pointer, guard); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int iteration) { iteration_contiguous_ = iteration % ThreadMap::Iterations::kContiguous; iteration_strided_ = iteration / ThreadMap::Iterations::kContiguous; } /// Advances to the next position to load or store CUTLASS_HOST_DEVICE InterleavedPredicatedTileIterator &operator++() { ++iteration_contiguous_; byte_pointer_ += params_.advance_row; if (iteration_contiguous_ == ThreadMap::Iterations::kContiguous) { iteration_contiguous_ = 0; ++iteration_strided_; byte_pointer_ += params_.advance_column; if (iteration_strided_ == ThreadMap::Iterations::kStrided) { iteration_strided_ = 0; } } return *this; } /// Advances a number of positions to load or store CUTLASS_HOST_DEVICE InterleavedPredicatedTileIterator &operator+=(int increment) { // Contiguous iteration_contiguous_ += increment; int increment_strided = iteration_contiguous_ / ThreadMap::Iterations::kContiguous; iteration_contiguous_ = iteration_contiguous_ % ThreadMap::Iterations::kContiguous; byte_pointer_ += (params_.advance_row * increment); // Strided iteration_strided_ += increment_strided; byte_pointer_ += (params_.advance_column * increment_strided); return *this; } ///< Efficiently disables all accesses guarded by mask CUTLASS_DEVICE void clear_mask() { mask_.clear(); } ///< Efficiently enables all accesses guarded by mask CUTLASS_DEVICE void enable_mask() { mask_.enable(); } ///< Sets the mask CUTLASS_DEVICE void get_mask(Mask &mask) { mask = mask_; } ///< Sets the mask CUTLASS_DEVICE void set_mask(Mask const &mask) { mask_ = mask; } }; /////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load output tile from global memory in epilogue. /// /// Satisfies: ReadableTileIterator | InterleavedMaskedTileIterator | ForwardTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_, ///< Element data type int InterleavedN ///< Number of Interleaved N > class InterleavedConvPredicatedTileIterator { public: using ThreadMap = ThreadMap_; using Element = Element_; using Layout = layout::TensorNCxHWx<InterleavedN>; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = Tensor4DCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kThreads = ThreadMap::kThreads; static int const kIterations = ThreadMap::Iterations::kCount; /// Fragment object using Fragment = Array<Element, ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; // // Parameters struct // struct Params { // // Data members // LongIndex stride_col; ///< stride in bytes between columns LongIndex stride_row; ///< stride in bytes between rows // // Methods // CUTLASS_HOST_DEVICE Status initialize(typename Layout::Stride stride_) { stride_col = stride_[1]; stride_row = stride_[2]; return Status::kSuccess; } CUTLASS_HOST_DEVICE Params() { initialize(cutlass::make_Coord(0, 0, 0)); } CUTLASS_HOST_DEVICE Params(Layout const &layout) { initialize(layout.stride()); } CUTLASS_HOST_DEVICE Params(Layout const &layout, // Not needed. Added to be compatible with strided conv epilogue. conv::Conv2dProblemSize const &problem_size): Params(layout) { } }; /// Mask object struct Mask { static int const kCount = (ThreadMap::Iterations::kRow < 8) ? 8 : ThreadMap::Iterations::kRow; /// Predicate state bool predicates[kCount]; // // Mask // CUTLASS_HOST_DEVICE Mask() { enable(); } ///< Efficiently disables all accesses guarded by mask CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = false; } } ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask CUTLASS_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = true; } } }; private: // // Data members // /// Parameters structure containing reference and precomputed state. Params params_; /// Byte-level pointer uint8_t *byte_pointer_; /// Array of boolean values to contain steady-state predicates Mask mask_; /// Extent of the matrix tile in columns Index extent_col_; /// Extent of the matrix tile in rows Index extent_row_; /// Extent of the matrix tile in pq Index extent_pq_; /// A thread's starting row position (assuming steady-state predicates have /// been computed) Index thread_start_row_; /// A thread's starting column position (assuming steady-state predicates have /// been computed) Index thread_start_col_; /// Internal iteration counter LongIndex iteration_row_; LongIndex iteration_col_; uint32_t pq_mul_; uint32_t pq_shr_; private: // // Methods // public: // // Methods // /// Constructor CUTLASS_DEVICE InterleavedConvPredicatedTileIterator( Params const & params, Element *pointer, TensorCoord extent, int thread_idx, MatrixCoord threadblock_offset ): params_(params) { MatrixCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset; extent_col_ = extent.c(); extent_pq_ = extent.h() * extent.w(); extent_row_ = extent.n() * extent_pq_; find_divisor(pq_mul_, pq_shr_, extent_pq_); thread_start_row_ = thread_offset.row(); thread_start_col_ = thread_offset.column(); // Initialize predicates CUTLASS_PRAGMA_UNROLL for (int r = 0; r < ThreadMap::Iterations::kRow; ++r) { mask_.predicates[r] = ((thread_offset.row() + ThreadMap::Delta::kRow * r) < extent_row_); } // Initialize pointer byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) + ((thread_start_col_ / InterleavedN) * params_.stride_col + (thread_start_col_ % InterleavedN)) * sizeof_bits<Element>::value / 8; // Initialize internal state counter iteration_row_ = iteration_col_ = 0; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { int col_offset = iteration_col_ * ThreadMap::Delta::kColumn; bool col_guard = ((thread_start_col_ + col_offset) < extent_col_); bool guard = col_guard && mask_.predicates[iteration_row_]; int n, pq_rem; fast_divmod(n, pq_rem, thread_start_row_ + iteration_row_ * ThreadMap::Delta::kRow, extent_pq_, pq_mul_, pq_shr_); uint8_t *byte_pointer = byte_pointer_ + (n * params_.stride_row + pq_rem * InterleavedN) * sizeof_bits<Element>::value / 8; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); AccessType const *memory_pointer = reinterpret_cast<AccessType const *>(byte_pointer); cutlass::arch::global_load< AccessType, sizeof(AccessType) >( *frag_ptr, (void *)memory_pointer, guard); } /// Stores a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { int col_offset = iteration_col_ * ThreadMap::Delta::kColumn; bool col_guard = ((thread_start_col_ + col_offset) < extent_col_); bool guard = col_guard && mask_.predicates[iteration_row_]; int n, pq_rem; fast_divmod(n, pq_rem, thread_start_row_ + iteration_row_ * ThreadMap::Delta::kRow, extent_pq_, pq_mul_, pq_shr_); uint8_t *byte_pointer = byte_pointer_ + (n * params_.stride_row + pq_rem * InterleavedN) * sizeof_bits<Element>::value / 8; AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer); cutlass::arch::global_store<AccessType, sizeof(AccessType)>( *frag_ptr, (void *)memory_pointer, guard); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int iteration) { iteration_row_ = iteration % ThreadMap::Iterations::kRow; iteration_col_ = iteration / ThreadMap::Iterations::kRow; } /// Advances to the next position to load or store CUTLASS_HOST_DEVICE InterleavedConvPredicatedTileIterator &operator++() { ++iteration_row_; if (iteration_row_ == ThreadMap::Iterations::kRow) { iteration_row_ = 0; ++iteration_col_; byte_pointer_ += params_.stride_col; if (iteration_col_ == ThreadMap::Iterations::kColumn) { iteration_col_ = 0; } } return *this; } ///< Efficiently disables all accesses guarded by mask CUTLASS_DEVICE void clear_mask() { mask_.clear(); } ///< Efficiently enables all accesses guarded by mask CUTLASS_DEVICE void enable_mask() { mask_.enable(); } ///< Sets the mask CUTLASS_DEVICE void get_mask(Mask &mask) { mask = mask_; } ///< Sets the mask CUTLASS_DEVICE void set_mask(Mask const &mask) { mask_ = mask; } }; /////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator.h", "repo_id": "cutlass", "token_count": 16487 }
27
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile that participate in one warp-level store operation. Typically, the accumulator tile is the largest single block of register-backed storage within the kernel. Storing it to memory is best accomplished by partitioning it into smaller tiles and storing these sequentially. Round trips through shared memory during the Epilogue phase require partitioning, as shared memory capacity is typically insufficient for a threadblock's total accumulator size. */ #pragma once #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/warp/volta_tensor_op_policy.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename InterleavedTileShape, ///< shape of indivisible instruction-level arrangement (concept: GemmShape) typename ElementC, ///< Accumulator layout typename Layout ///< target shared memory layout > class FragmentIteratorVoltaTensorOp; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major shared memory template < typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape) > class FragmentIteratorVoltaTensorOp<WarpShape_, gemm::GemmShape<32, 32, 4>, half_t, layout::RowMajor> { public: using WarpShape = WarpShape_; using InterleavedTileShape = gemm::GemmShape<32, 32, 4>; using ElementC = half_t; using Layout = layout::RowMajor; /// Policy operator using Policy = VoltaTensorOpPolicy<WarpShape, InterleavedTileShape, ElementC, Layout>; /// Array type for aligned memory accesses using AccessType = typename Policy::AccessType; /// This is the fragment size produced by one access of the iterator. using Fragment = typename Policy::Fragment; /// This is the complete warp-level accumulator tile. using AccumulatorTile = typename Policy::AccumulatorTile; using OutputAccumulatorTile = AccumulatorTile; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; private: private: // // Data members // /// Accumulator tile AccessType const *accumulators_; /// Internal index int index_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE FragmentIteratorVoltaTensorOp(AccumulatorTile const &accum): accumulators_(reinterpret_cast<AccessType const *>(&accum)), index_(0) { } /// Increments CUTLASS_HOST_DEVICE FragmentIteratorVoltaTensorOp &operator++() { ++index_; return *this; } /// Decrements CUTLASS_HOST_DEVICE FragmentIteratorVoltaTensorOp &operator--() { --index_; return *this; } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag, int index_offset = 0) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); static int const kAccessesPerMma = Policy::kElementsPerMma / Policy::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) { int tile_access_idx = (tile_n * Policy::TileIterations::kRow + (index_ & 2) / 2) * Policy::MmaIterations::kCount * kAccessesPerMma; CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn * kAccessesPerMma; ++mma_n) { int mma_access_idx = ((mma_n & 1) * 2 + (index_ & 1)) * kAccessesPerMma + (mma_n & 2) / 2; frag_ptr[tile_n * Policy::MmaIterations::kColumn * kAccessesPerMma + mma_n] = accumulators_[tile_access_idx + mma_access_idx]; } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major shared memory template < typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape) > class FragmentIteratorVoltaTensorOp<WarpShape_, gemm::GemmShape<32, 32, 4>, float, layout::RowMajor> { public: using WarpShape = WarpShape_; using InterleavedTileShape = gemm::GemmShape<32, 32, 4>; using ElementC = float; using Layout = layout::RowMajor; /// Policy operator using Policy = VoltaTensorOpPolicy<WarpShape, InterleavedTileShape, ElementC, Layout>; /// Array type for aligned memory accesses using AccessType = typename Policy::AccessType; /// This is the fragment size produced by one access of the iterator. using Fragment = typename Policy::Fragment; /// This is the complete warp-level accumulator tile. using AccumulatorTile = typename Policy::AccumulatorTile; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; private: private: // // Data members // /// Accumulator tile AccessType const *accumulators_; /// Internal index int index_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE FragmentIteratorVoltaTensorOp(AccumulatorTile const &accum): accumulators_(reinterpret_cast<AccessType const *>(&accum)), index_(0) { } /// Increments CUTLASS_HOST_DEVICE FragmentIteratorVoltaTensorOp &operator++() { ++index_; return *this; } /// Decrements CUTLASS_HOST_DEVICE FragmentIteratorVoltaTensorOp &operator--() { --index_; return *this; } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag, int index_offset = 0) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); int const kRegsPerMmaRow = 2; CUTLASS_PRAGMA_UNROLL for (int reg_row = 0; reg_row < Policy::kRowsPerMmaTile; ++reg_row) { CUTLASS_PRAGMA_UNROLL for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) { CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn * 2; ++mma_n) { int mma_idx = (index_ & 1) + (index_ & 2) * Policy::MmaIterations::kCount / 2 + (tile_n * Policy::TileIterations::kRow) * Policy::MmaIterations::kCount + (mma_n & 1) * 2; int reg_offset = reg_row * kRegsPerMmaRow + (mma_n & 2) * 2; int reg_idx = mma_idx * Policy::kElementsPerMma + reg_offset; *frag_ptr = accumulators_[reg_idx / Policy::kElementsPerAccess]; ++frag_ptr; } } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h", "repo_id": "cutlass", "token_count": 2875 }
28
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/gemm/collective/builders/sm90_common.inl" // SM90 Collective Builders should be used only starting CUDA 12.0 #if (__CUDACC_VER_MAJOR__ >= 12) #define CUTLASS_SM90_COLLECTIVE_BUILDER_SUPPORTED #endif ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::collective { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { // Returns the maximum number of smem tiles that can be used with a given smem capacity, or overrides with manual count. template<int CapacityBytes, class ElementA, class ElementB, class TileShapeMNK, int stages> constexpr int compute_stage_count_or_override(StageCount<stages> stage_count) { return stages; } // Returns the maximum number of smem tiles that can be used with a given smem capacity, or overrides with manual count. template<int CapacityBytes, class ElementA, class ElementB, class TileShapeMNK, int stages> constexpr int compute_stage_count_or_override(cute::Int<stages> stage_count) { return stages; } // Returns the maximum number of smem tiles that can be used with a given smem capacity, or overrides with manual count. template<int CapacityBytes, class ElementA, class ElementB, class TileShapeMNK, int carveout_bytes> constexpr int compute_stage_count_or_override(StageCountAutoCarveout<carveout_bytes> stage_count) { constexpr auto mainloop_pipeline_bytes = sizeof(typename cutlass::PipelineTmaAsync<1>::SharedStorage); constexpr auto a_bits = cute::sizeof_bits_v<ElementA>; constexpr auto b_bits = cute::sizeof_bits_v<ElementB>; constexpr int stage_bytes = cutlass::bits_to_bytes(a_bits * size<0>(TileShapeMNK{}) * size<2>(TileShapeMNK{})) + cutlass::bits_to_bytes(b_bits * size<1>(TileShapeMNK{}) * size<2>(TileShapeMNK{})) + static_cast<int>(mainloop_pipeline_bytes); return (CapacityBytes - carveout_bytes) / stage_bytes; } // Returns the maximum number of smem tiles that can be used with a given smem capacity (with an optional scale matrix), or overrides with manual count. template<int CapacityBytes, class ElementA, class ElementB, class ElementScale, class ElementZero, class TileShapeMNK, int stages> constexpr int compute_stage_count_or_override_single_affine_transformed_input(StageCount<stages> stage_count) { return stages; } template <class Element> constexpr int get_bits_for_possibly_void_element() { if constexpr (cute::is_same_v<Element, void>) { return 0; } else { return sizeof_bits<Element>::value; } } // Returns the maximum number of smem tiles that can be used with a given smem capacity (with an optional scale matrix), or overrides with manual count. template<int CapacityBytes, class ElementA, class ElementB, class ElementScale, class ElementZero, class TileShapeMNK, int carveout_bytes> constexpr int compute_stage_count_or_override_single_affine_transformed_input(StageCountAutoCarveout<carveout_bytes> stage_count) { // 32 bytes to account for barriers etc. constexpr auto mainloop_pipeline_bytes = sizeof(typename cutlass::PipelineTmaAsync<1>::SharedStorage); constexpr int scale_zero_k_tile = 1; constexpr auto a_bits = cute::sizeof_bits_v<ElementA>; constexpr auto b_bits = cute::sizeof_bits_v<ElementB>; constexpr auto s_bits = get_bits_for_possibly_void_element<ElementScale>(); constexpr auto z_bits = get_bits_for_possibly_void_element<ElementZero>(); constexpr auto scale_bytes = cutlass::bits_to_bytes(s_bits * size<0>(TileShapeMNK{}) * scale_zero_k_tile); constexpr auto zero_bytes = cutlass::bits_to_bytes(z_bits * size<0>(TileShapeMNK{}) * scale_zero_k_tile); static_assert(scale_bytes % 128 == 0, "Scale bytes must be a multiple of 128"); static_assert(zero_bytes % 128 == 0, "Zero bytes must be a multiple of 128"); // When scales are void, s_bits will be 0 so no smem will be allocated for scales. constexpr int stage_bytes = cutlass::bits_to_bytes(a_bits * size<0>(TileShapeMNK{}) * size<2>(TileShapeMNK{})) + cutlass::bits_to_bytes(b_bits * size<1>(TileShapeMNK{}) * size<2>(TileShapeMNK{})) + static_cast<int>(scale_bytes + zero_bytes + mainloop_pipeline_bytes); return (CapacityBytes - carveout_bytes) / stage_bytes; } template <class ElementA, class LayoutA, class ElementB, class LayoutB> constexpr bool is_swapAB(){ constexpr bool IsInputSizeTwoBytes = is_input_size_two_bytes<ElementA, ElementB>(); constexpr bool IsLayoutAkBmn = cutlass::gemm::detail::is_k_major_A<LayoutA>() && cutlass::gemm::detail::is_mn_major_B<LayoutB>(); constexpr bool SwapAB = !IsInputSizeTwoBytes && IsLayoutAkBmn; return SwapAB; } template <class ElementA, class LayoutA, class ElementB, class LayoutB, class KernelScheduleType> constexpr bool is_warpspecialized_transpose_B(){ constexpr bool IsInputSizeTwoBytes = is_input_size_two_bytes<ElementA, ElementB>(); constexpr bool IsLayoutAmnBmn = cutlass::gemm::detail::is_mn_major_A<LayoutA>() && cutlass::gemm::detail::is_mn_major_B<LayoutB>(); constexpr bool IsWarpSpecialized = cute::is_base_of_v<KernelTmaWarpSpecialized, KernelScheduleType> || cute::is_base_of_v<KernelTmaWarpSpecializedPingpong, KernelScheduleType> || cute::is_base_of_v<KernelTmaWarpSpecializedCooperative, KernelScheduleType> || cute::is_base_of_v<KernelCpAsyncWarpSpecialized, KernelScheduleType> || cute::is_base_of_v<KernelCpAsyncWarpSpecializedPingpong, KernelScheduleType> || cute::is_base_of_v<KernelCpAsyncWarpSpecializedCooperative, KernelScheduleType>; constexpr bool IsWarpSpecializedTransposeB = !IsInputSizeTwoBytes && IsLayoutAmnBmn && IsWarpSpecialized; return IsWarpSpecializedTransposeB; } } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// // GMMA_TMA_WS_SS template < class ElementA, class GmemLayoutATag, int AlignmentA, class ElementB, class GmemLayoutBTag, int AlignmentB, class ElementAccumulator, class TileShape_MNK, class ClusterShape_MNK, class StageCountType, class KernelScheduleType > struct CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, ElementA, GmemLayoutATag, AlignmentA, ElementB, GmemLayoutBTag, AlignmentB, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, StageCountType, KernelScheduleType, cute::enable_if_t< (cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecialized> || cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecializedPingpong> || cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecializedCooperative> || cute::is_same_v<KernelScheduleType, KernelPtrArrayTmaWarpSpecializedCooperative>) && not detail::is_use_rmem_A<ElementA, GmemLayoutATag, ElementB, GmemLayoutBTag>()> > { static_assert(is_static<TileShape_MNK>::value); static_assert(is_static<ClusterShape_MNK>::value); #ifndef CUTLASS_SM90_COLLECTIVE_BUILDER_SUPPORTED static_assert(cutlass::detail::dependent_false<ElementA>, "Unsupported Toolkit for SM90 Collective Builder\n"); #endif static_assert(detail::is_aligned<ElementA, AlignmentA, ElementB, AlignmentB, detail::tma_alignment_bytes>(), "Should meet TMA alignment requirement\n"); static constexpr bool IsArrayOfPointersGemm = (cute::is_same_v<KernelScheduleType, KernelPtrArrayTmaWarpSpecializedCooperative>); static constexpr bool IsFP8Input = detail::is_input_fp8<ElementA, ElementB>(); static_assert(!IsFP8Input || (IsFP8Input && !IsArrayOfPointersGemm), "Kernel[Array/Group]TmaWarpSpecializedCooperative is only compatible with FP8 FastAccum version right now\n"); // For fp32 types, map to tf32 MMA value type using ElementAMma = cute::conditional_t<cute::is_same_v<ElementA, float>, tfloat32_t, ElementA>; using ElementBMma = cute::conditional_t<cute::is_same_v<ElementB, float>, tfloat32_t, ElementB>; static constexpr cute::GMMA::Major GmmaMajorA = detail::gmma_ss_tag_to_major_A<ElementAMma, GmemLayoutATag>(); static constexpr cute::GMMA::Major GmmaMajorB = detail::gmma_ss_tag_to_major_B<ElementBMma, GmemLayoutBTag>(); using AtomLayoutMNK = cute::conditional_t< cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecializedCooperative> || IsArrayOfPointersGemm, Layout<Shape<_2,_1,_1>>, Layout<Shape<_1,_1,_1>>>; using TiledMma = decltype(cute::make_tiled_mma(cute::GMMA::ss_op_selector< ElementAMma, ElementBMma, ElementAccumulator, TileShape_MNK, GmmaMajorA, GmmaMajorB>(), AtomLayoutMNK{})); using GmemTiledCopyA = decltype(detail::sm90_cluster_shape_to_tma_atom(shape<1>(ClusterShape_MNK{}))); using GmemTiledCopyB = decltype(detail::sm90_cluster_shape_to_tma_atom(shape<0>(ClusterShape_MNK{}))); using SmemLayoutAtomA = decltype(detail::ss_smem_selector< GmmaMajorA, ElementAMma, decltype(cute::get<0>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{}))>()); using SmemLayoutAtomB = decltype(detail::ss_smem_selector< GmmaMajorB, ElementBMma, decltype(cute::get<1>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{}))>()); static constexpr int PipelineStages = detail::compute_stage_count_or_override<detail::sm90_smem_capacity_bytes, ElementAMma, ElementBMma, TileShape_MNK>(StageCountType{}); using DispatchPolicy = cute::conditional_t<IsArrayOfPointersGemm, MainloopSm90ArrayTmaGmmaWarpSpecialized<PipelineStages, ClusterShape_MNK, KernelScheduleType>, /* For FP8 use a separate mainloop compared to other datatypes */ cute::conditional_t<IsFP8Input, MainloopSm90TmaGmmaWarpSpecializedFP8<PipelineStages, ClusterShape_MNK, KernelScheduleType>, MainloopSm90TmaGmmaWarpSpecialized<PipelineStages, ClusterShape_MNK, KernelScheduleType>>>; using SmemCopyAtomA = void; using SmemCopyAtomB = void; using CollectiveOp = CollectiveMma< DispatchPolicy, TileShape_MNK, ElementA, TagToStrideA_t<GmemLayoutATag>, ElementB, TagToStrideB_t<GmemLayoutBTag>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // GMMA_TMA_WS_RS template < class ElementA, class GmemLayoutATag, int AlignmentA, class ElementB, class GmemLayoutBTag, int AlignmentB, class ElementAccumulator, class TileShape_MNK, class ClusterShape_MNK, class StageCountType, class KernelScheduleType > struct CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, ElementA, GmemLayoutATag, AlignmentA, ElementB, GmemLayoutBTag, AlignmentB, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, StageCountType, KernelScheduleType, cute::enable_if_t< (cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecialized> || cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecializedPingpong> || cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecializedCooperative>) && detail::is_use_rmem_A<ElementA, GmemLayoutATag, ElementB, GmemLayoutBTag>()> > { static_assert(is_static<TileShape_MNK>::value); static_assert(is_static<ClusterShape_MNK>::value); static_assert(detail::is_aligned<ElementA, AlignmentA, ElementB, AlignmentB, detail::tma_alignment_bytes>(), "Should meet TMA alignment requirement\n"); #ifndef CUTLASS_SM90_COLLECTIVE_BUILDER_SUPPORTED static_assert(cutlass::detail::dependent_false<ElementA>, "Unsupported Toolkit for SM90 Collective Builder\n"); #endif static constexpr cute::GMMA::Major GmmaMajorA = detail::gmma_rs_tag_to_major_A<GmemLayoutATag>(); static constexpr cute::GMMA::Major GmmaMajorB = detail::gmma_rs_tag_to_major_B<GmemLayoutBTag>(); static constexpr bool SwapAB = detail::is_swapAB<ElementA, GmemLayoutATag, ElementB, GmemLayoutBTag>(); static constexpr bool IsWarpSpecializedTransposeB = detail::is_warpspecialized_transpose_B< ElementA, GmemLayoutATag, ElementB, GmemLayoutBTag, KernelScheduleType>(); // For fp32 types, map to tf32 MMA value type using ElementAMma = cute::conditional_t<cute::is_same_v<ElementA, float>, tfloat32_t, ElementA>; using ElementBMma = cute::conditional_t<cute::is_same_v<ElementB, float>, tfloat32_t, ElementB>; using AtomLayoutMNK = cute::conditional_t<cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecializedCooperative>, Layout<Shape<_2,_1,_1>>, Layout<Shape<_1,_1,_1>>>; using TiledMma = decltype(cute::make_tiled_mma(cute::GMMA::rs_op_selector< ElementAMma, ElementBMma, ElementAccumulator, TileShape_MNK, GMMA::Major::K, GMMA::Major::K>(), AtomLayoutMNK{})); using GmemTiledCopyA = decltype(detail::sm90_cluster_shape_to_tma_atom(shape<1>(ClusterShape_MNK{}))); using GmemTiledCopyB = decltype(detail::sm90_cluster_shape_to_tma_atom(shape<0>(ClusterShape_MNK{}))); using SmemLayoutAtomA = decltype(detail::rs_smem_selector<GmmaMajorA, ElementAMma, decltype(cute::get<0>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{})), IsWarpSpecializedTransposeB>()); using SmemLayoutAtomB = decltype(detail::rs_smem_selector<GmmaMajorB, ElementBMma, decltype(cute::get<1>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{})), IsWarpSpecializedTransposeB>()); static constexpr int PipelineStages = detail::compute_stage_count_or_override<detail::sm90_smem_capacity_bytes, ElementAMma, ElementBMma, TileShape_MNK>(StageCountType{}); using DispatchPolicy = MainloopSm90TmaGmmaRmemAWarpSpecialized< PipelineStages, ClusterShape_MNK, KernelScheduleType>; using SmemCopyAtomA = cute::conditional_t<SwapAB, void, Copy_Atom<cute::DefaultCopy, ElementA>>; using SmemCopyAtomB = cute::conditional_t<SwapAB, Copy_Atom<cute::DefaultCopy, ElementB>, void>; using CollectiveOp = CollectiveMma< DispatchPolicy, TileShape_MNK, ElementA, TagToStrideA_t<GmemLayoutATag>, ElementB, TagToStrideB_t<GmemLayoutBTag>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // GMMA_TMA_WS_RS Mixed Scaled GEMM template < class ElementPairA_, class GmemLayoutATag_, int AlignmentA, class ElementPairB_, class GmemLayoutBTag_, int AlignmentB, class ElementAccumulator, class TileShape_MNK, class ClusterShape_MNK, class StageCountType, class KernelScheduleType > struct CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, ElementPairA_, GmemLayoutATag_, AlignmentA, ElementPairB_, GmemLayoutBTag_, AlignmentB, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, StageCountType, KernelScheduleType, cute::enable_if_t< (cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecializedMixedInput> || cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecializedPingpongMixedInput> || cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecializedCooperativeMixedInput>)> > { private: using ScaleA = detail::deduce_mixed_width_dtype_t<1, ElementPairA_>; using ScaleB = detail::deduce_mixed_width_dtype_t<1, ElementPairB_>; using ZeroA = detail::deduce_mixed_width_dtype_t<2, ElementPairA_>; using ZeroB = detail::deduce_mixed_width_dtype_t<2, ElementPairB_>; static constexpr bool NeitherIsTuple = !cute::is_tuple<ElementPairA_>::value && !cute::is_tuple<ElementPairB_>::value; public: using ElementA = detail::deduce_mixed_width_dtype_t<0, ElementPairA_>; using ElementB = detail::deduce_mixed_width_dtype_t<0, ElementPairB_>; static_assert(cute::is_tuple<ElementPairA_>::value ^ cute::is_tuple<ElementPairB_>::value || (NeitherIsTuple && (sizeof_bits<ElementA>::value != sizeof_bits<ElementB>::value)), "Either A OR B must be a tuple or the widths of A and B must be different."); static constexpr bool IsANarrow = sizeof_bits<ElementA>::value < sizeof_bits<ElementB>::value; using GmemLayoutATag = GmemLayoutATag_; using GmemLayoutBTag = GmemLayoutBTag_; using ElementPairA = cute::conditional_t<IsANarrow && NeitherIsTuple, cute::tuple<ElementA>, ElementPairA_>; using ElementPairB = cute::conditional_t<!IsANarrow && NeitherIsTuple, cute::tuple<ElementB>, ElementPairB_>; static constexpr bool IsATransformed = cute::is_tuple<ElementPairA>::value; using ElementScale = cute::conditional_t<IsATransformed, ScaleA, ScaleB>; using ElementZero = cute::conditional_t<IsATransformed, ZeroA, ZeroB>; static_assert(is_static<TileShape_MNK>::value); static_assert(is_static<ClusterShape_MNK>::value); static_assert(detail::is_aligned<ElementA, AlignmentA, ElementB, AlignmentB, detail::tma_alignment_bytes>(), "Should meet TMA alignment requirement\n"); #ifndef CUTLASS_SM90_COLLECTIVE_BUILDER_SUPPORTED static_assert(cutlass::detail::dependent_false<ElementA>, "Unsupported Toolkit for SM90 Collective Builder\n"); #endif static constexpr cute::GMMA::Major GmmaMajorA = detail::gmma_rs_tag_to_major_A<GmemLayoutATag>(); static constexpr cute::GMMA::Major GmmaMajorB = detail::gmma_rs_tag_to_major_B<GmemLayoutBTag>(); static constexpr bool IsWarpSpecializedTransposeB = detail::is_warpspecialized_transpose_B< ElementA, GmemLayoutATag, ElementB, GmemLayoutBTag, KernelScheduleType>(); static_assert(!IsWarpSpecializedTransposeB, "Mixed input GEMM does not support WS transpose B."); // If A is scaled, then we don't need to swap. Otherwise, we must ensure B goes to RF and we must swap the operands. static constexpr bool SwapAB = !IsATransformed; // When we relax the above assertion, we must handle setting the tile mma GmmaMajorB correctly. static constexpr cute::GMMA::Major TiledMmaGmmaMajorB = SwapAB ? GmmaMajorA : GmmaMajorB; using ElementMma = cute::conditional_t<IsATransformed, ElementB, ElementA>; using AtomLayoutMNK = cute::conditional_t<cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecializedCooperativeMixedInput>, Layout<Shape<_2,_1,_1>>, Layout<Shape<_1,_1,_1>>>; using TiledMma = decltype(cute::make_tiled_mma(cute::GMMA::rs_op_selector< ElementMma, ElementMma, ElementAccumulator, TileShape_MNK, GMMA::Major::K, TiledMmaGmmaMajorB>(), AtomLayoutMNK{})); using GmemTiledCopyA = decltype(detail::sm90_cluster_shape_to_tma_atom(shape<1>(ClusterShape_MNK{}))); using GmemTiledCopyB = decltype(detail::sm90_cluster_shape_to_tma_atom(shape<0>(ClusterShape_MNK{}))); using SmemLayoutAtomA = decltype(detail::rs_smem_selector<GmmaMajorA, ElementA, decltype(cute::get<0>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{})), IsWarpSpecializedTransposeB>()); using SmemLayoutAtomB = decltype(detail::rs_smem_selector<GmmaMajorB, ElementB, decltype(cute::get<1>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{})), IsWarpSpecializedTransposeB>()); using RealElementA = cute::conditional_t<SwapAB, ElementB, ElementA>; using RealElementB = cute::conditional_t<SwapAB, ElementA, ElementB>; static constexpr int PipelineStages = detail::compute_stage_count_or_override_single_affine_transformed_input<detail::sm90_smem_capacity_bytes, RealElementA, RealElementB, ElementScale, ElementZero, TileShape_MNK>(StageCountType{}); using SmemCopyAtomA = cute::conditional_t<SwapAB, void, Copy_Atom<cute::DefaultCopy, ElementA>>; using SmemCopyAtomB = cute::conditional_t<SwapAB, Copy_Atom<cute::DefaultCopy, ElementB>, void>; using DispatchPolicy = MainloopSm90TmaGmmaRmemAWarpSpecializedMixedInput<PipelineStages, ClusterShape_MNK, KernelScheduleType>; // We pack the scale data with the operand that will be optionally scaled and converted before MMA. using StrideA = TagToStrideA_t<GmemLayoutATag>; using StrideB = TagToStrideB_t<GmemLayoutBTag>; using CollectiveOp = CollectiveMma< DispatchPolicy, TileShape_MNK, ElementPairA, StrideA, ElementPairB, StrideB, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // GMMA_TMA_WS_FP8_FAST_ACCUM_SS template < class ElementA, class GmemLayoutATag, int AlignmentA, class ElementB, class GmemLayoutBTag, int AlignmentB, class ElementAccumulator, class TileShape_MNK, class ClusterShape_MNK, class StageCountType, class KernelScheduleType > struct CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, ElementA, GmemLayoutATag, AlignmentA, ElementB, GmemLayoutBTag, AlignmentB, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, StageCountType, KernelScheduleType, cute::enable_if_t< cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecializedFP8FastAccum> || cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecializedPingpongFP8FastAccum> || cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecializedCooperativeFP8FastAccum> || cute::is_same_v<KernelScheduleType, KernelPtrArrayTmaWarpSpecializedCooperativeFP8FastAccum>> > { static_assert(is_static<TileShape_MNK>::value); static_assert(is_static<ClusterShape_MNK>::value); static_assert(detail::is_aligned<ElementA, AlignmentA, ElementB, AlignmentB, detail::tma_alignment_bytes>(), "Not meet TMA alignment requirement yet\n"); static_assert(detail::is_input_fp8<ElementA, ElementB>(), "Only FP8 datatypes are compatible with these kernel schedules\n"); // Dispatch TN fp8 kernels only to TMA warp specialized FP8 builder static_assert(!detail::is_use_rmem_A<ElementA, GmemLayoutATag, ElementB, GmemLayoutBTag>(), "Not supported for fp8 non-TN warp specialized kernels yet\n"); #ifndef CUTLASS_SM90_COLLECTIVE_BUILDER_SUPPORTED static_assert(cutlass::detail::dependent_false<ElementA>, "Unsupported Toolkit for SM90 Collective Builder\n"); #endif static constexpr cute::GMMA::Major GmmaMajorA = detail::gmma_ss_tag_to_major_A<ElementA, GmemLayoutATag>(); static constexpr cute::GMMA::Major GmmaMajorB = detail::gmma_ss_tag_to_major_B<ElementB, GmemLayoutBTag>(); static constexpr bool IsArrayOfPointersGemm = (cute::is_same_v<KernelScheduleType, KernelPtrArrayTmaWarpSpecializedCooperativeFP8FastAccum>); using AtomLayoutMNK = cute::conditional_t<cute::is_same_v<KernelScheduleType, KernelTmaWarpSpecializedCooperativeFP8FastAccum> || IsArrayOfPointersGemm, Layout<Shape<_2,_1,_1>>, Layout<Shape<_1,_1,_1>>>; using TiledMma = decltype(cute::make_tiled_mma(cute::GMMA::ss_op_selector< ElementA, ElementB, ElementAccumulator, TileShape_MNK, GmmaMajorA, GmmaMajorB>(), AtomLayoutMNK{})); using GmemTiledCopyA = decltype(detail::sm90_cluster_shape_to_tma_atom(shape<1>(ClusterShape_MNK{}))); using GmemTiledCopyB = decltype(detail::sm90_cluster_shape_to_tma_atom(shape<0>(ClusterShape_MNK{}))); using SmemLayoutAtomA = decltype(detail::ss_smem_selector< GmmaMajorA, ElementA, decltype(cute::get<0>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{}))>()); using SmemLayoutAtomB = decltype(detail::ss_smem_selector< GmmaMajorB, ElementB, decltype(cute::get<1>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{}))>()); static constexpr int PipelineStages = detail::compute_stage_count_or_override<detail::sm90_smem_capacity_bytes, ElementA, ElementB, TileShape_MNK>(StageCountType{}); using DispatchPolicy = cute::conditional_t<IsArrayOfPointersGemm, MainloopSm90ArrayTmaGmmaWarpSpecialized<PipelineStages, ClusterShape_MNK, KernelScheduleType>, MainloopSm90TmaGmmaWarpSpecialized<PipelineStages, ClusterShape_MNK, KernelScheduleType>>; using SmemCopyAtomA = void; using SmemCopyAtomB = void; using CollectiveOp = CollectiveMma< DispatchPolicy, TileShape_MNK, ElementA, TagToStrideA_t<GmemLayoutATag>, ElementB, TagToStrideB_t<GmemLayoutBTag>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // GMMA_TMA_SS template < class ElementA, class GmemLayoutATag, int AlignmentA, class ElementB, class GmemLayoutBTag, int AlignmentB, class ElementAccumulator, class TileShape_MNK, class ClusterShape_MNK, class StageCountType, class KernelScheduleType > struct CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, ElementA, GmemLayoutATag, AlignmentA, ElementB, GmemLayoutBTag, AlignmentB, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, StageCountType, KernelScheduleType, cute::enable_if_t<cute::is_same_v<KernelScheduleType, KernelTma> && not detail::is_use_rmem_A<ElementA, GmemLayoutATag, ElementB, GmemLayoutBTag>()> > { static_assert(is_static<TileShape_MNK>::value); static_assert(is_static<ClusterShape_MNK>::value); static_assert(detail::is_aligned<ElementA, AlignmentA, ElementB, AlignmentB, detail::tma_alignment_bytes>(), "Should meet TMA alignment requirement\n"); #ifndef CUTLASS_SM90_COLLECTIVE_BUILDER_SUPPORTED static_assert(cutlass::detail::dependent_false<ElementA>, "Unsupported Toolkit for SM90 Collective Builder\n"); #endif // For fp32 types, map to tf32 MMA value type using ElementAMma = cute::conditional_t<cute::is_same_v<ElementA, float>, tfloat32_t, ElementA>; using ElementBMma = cute::conditional_t<cute::is_same_v<ElementB, float>, tfloat32_t, ElementB>; static constexpr cute::GMMA::Major GmmaMajorA = detail::gmma_ss_tag_to_major_A<ElementAMma, GmemLayoutATag>(); static constexpr cute::GMMA::Major GmmaMajorB = detail::gmma_ss_tag_to_major_B<ElementBMma, GmemLayoutBTag>(); using TiledMma = decltype(cute::make_tiled_mma(cute::GMMA::ss_op_selector< ElementAMma, ElementBMma, ElementAccumulator, TileShape_MNK, GmmaMajorA, GmmaMajorB>())); using GmemTiledCopyA = decltype(detail::sm90_cluster_shape_to_tma_atom(shape<1>(ClusterShape_MNK{}))); using GmemTiledCopyB = decltype(detail::sm90_cluster_shape_to_tma_atom(shape<0>(ClusterShape_MNK{}))); using SmemLayoutAtomA = decltype(detail::ss_smem_selector< GmmaMajorA, ElementAMma, decltype(cute::get<0>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{}))>()); using SmemLayoutAtomB = decltype(detail::ss_smem_selector< GmmaMajorB, ElementBMma, decltype(cute::get<1>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{}))>()); static constexpr int PipelineStages = detail::compute_stage_count_or_override<detail::sm90_smem_capacity_bytes, ElementAMma, ElementBMma, TileShape_MNK>(StageCountType{}); using DispatchPolicy = MainloopSm90TmaGmma<PipelineStages, ClusterShape_MNK>; using SmemCopyAtomA = void; using SmemCopyAtomB = void; using CollectiveOp = CollectiveMma< DispatchPolicy, TileShape_MNK, ElementA, TagToStrideA_t<GmemLayoutATag>, ElementB, TagToStrideB_t<GmemLayoutBTag>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// // GMMA_CpAsync template < class ElementA, class GmemLayoutATag, int AlignmentA, class ElementB, class GmemLayoutBTag, int AlignmentB, class ElementAccumulator, class TileShape_MNK, class ClusterShape_MNK, class StageCountType, class KernelScheduleType > struct [[deprecated("Use one of KernelCpAsyncWarpSpecialized schedules instead")]] CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, ElementA, GmemLayoutATag, AlignmentA, ElementB, GmemLayoutBTag, AlignmentB, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, StageCountType, KernelScheduleType, cute::enable_if_t< cute::is_same_v<KernelScheduleType, KernelMultistage>> > { // Map to warp-specialized kernels for better performance using CollectiveOp = typename CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, ElementA, GmemLayoutATag, AlignmentA, ElementB, GmemLayoutBTag, AlignmentB, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, StageCountType, KernelCpAsyncWarpSpecialized >::CollectiveOp; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // GMMA_CpAsync_WS_SS template < class ElementA, class GmemLayoutATag, int AlignmentA, class ElementB, class GmemLayoutBTag, int AlignmentB, class ElementAccumulator, class TileShape_MNK, class ClusterShape_MNK, class StageCountType, class KernelScheduleType > struct CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, ElementA, GmemLayoutATag, AlignmentA, ElementB, GmemLayoutBTag, AlignmentB, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, StageCountType, KernelScheduleType, cute::enable_if_t< (cute::is_same_v<KernelScheduleType, KernelCpAsyncWarpSpecialized> || cute::is_same_v<KernelScheduleType, KernelCpAsyncWarpSpecializedCooperative> || cute::is_same_v<KernelScheduleType, KernelCpAsyncWarpSpecializedPingpong>) && not detail::is_use_rmem_A<ElementA, GmemLayoutATag, ElementB, GmemLayoutBTag>() > > { static_assert(is_static<TileShape_MNK>::value); static_assert(is_static<ClusterShape_MNK>::value); #ifndef CUTLASS_SM90_COLLECTIVE_BUILDER_SUPPORTED static_assert(cutlass::detail::dependent_false<ElementA>, "Unsupported Toolkit for SM90 Collective Builder\n"); #endif // For fp32 types, map to tf32 MMA value type using ElementAMma = cute::conditional_t<cute::is_same_v<ElementA, float>, tfloat32_t, ElementA>; using ElementBMma = cute::conditional_t<cute::is_same_v<ElementB, float>, tfloat32_t, ElementB>; static_assert(detail::is_aligned<ElementA, AlignmentA, ElementB, AlignmentB, detail::cp_async_min_alignment_bytes>(), "Minimum alignment required for cp.async is 4B."); static constexpr cute::GMMA::Major GmmaMajorA = detail::gmma_ss_tag_to_major_A<ElementA, GmemLayoutATag>(); static constexpr cute::GMMA::Major GmmaMajorB = detail::gmma_ss_tag_to_major_B<ElementB, GmemLayoutBTag>(); using AtomLayoutMNK = cute::conditional_t<cute::is_same_v<KernelScheduleType, KernelCpAsyncWarpSpecializedCooperative>, Layout<Shape<cute::Int<(size<0>(TileShape_MNK{}) < 128) ? 1 : 2>,_1,_1>>, Layout<Shape<_1,_1,_1>>>; using TiledMma = decltype(cute::make_tiled_mma(cute::GMMA::ss_op_selector< ElementAMma, ElementBMma, ElementAccumulator, TileShape_MNK, GmmaMajorA, GmmaMajorB>(), AtomLayoutMNK{})); static constexpr int NumLoadWarpGroups = cute::is_same_v<KernelScheduleType, KernelCpAsyncWarpSpecialized> ? 2 : 1; using GmemTiledCopyA = decltype(detail::make_cp_async_gmem_tiled_copy< NumThreadsPerWarpGroup * NumLoadWarpGroups, ElementA, AlignmentA, TagToStrideA_t<GmemLayoutATag>, decltype(cute::get<0>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{}))>()); using GmemTiledCopyB = decltype(detail::make_cp_async_gmem_tiled_copy< NumThreadsPerWarpGroup * NumLoadWarpGroups, ElementB, AlignmentB, TagToStrideB_t<GmemLayoutBTag>, decltype(cute::get<1>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{}))>()); using SmemLayoutAtomA = decltype(detail::ss_smem_selector< GmmaMajorA, ElementAMma, decltype(cute::get<0>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{}))>()); using SmemLayoutAtomB = decltype(detail::ss_smem_selector< GmmaMajorB, ElementBMma, decltype(cute::get<1>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{}))>()); static constexpr int PipelineStages = detail::compute_stage_count_or_override< detail::sm90_smem_capacity_bytes, ElementAMma, ElementBMma, TileShape_MNK>(StageCountType{}); using DispatchPolicy = MainloopSm90CpAsyncGmmaWarpSpecialized< PipelineStages, ClusterShape_MNK, KernelScheduleType>; using CollectiveOp = CollectiveMma< DispatchPolicy, TileShape_MNK, ElementA, TagToStrideA_t<GmemLayoutATag>, ElementB, TagToStrideB_t<GmemLayoutBTag>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, void, cute::identity, GmemTiledCopyB, SmemLayoutAtomB, void, cute::identity >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // GMMA_CpAsync_WS_RS template < class ElementA, class GmemLayoutATag, int AlignmentA, class ElementB, class GmemLayoutBTag, int AlignmentB, class ElementAccumulator, class TileShape_MNK, class ClusterShape_MNK, class StageCountType, class KernelScheduleType > struct CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, ElementA, GmemLayoutATag, AlignmentA, ElementB, GmemLayoutBTag, AlignmentB, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, StageCountType, KernelScheduleType, cute::enable_if_t< (cute::is_same_v<KernelScheduleType, KernelCpAsyncWarpSpecialized> || cute::is_same_v<KernelScheduleType, KernelCpAsyncWarpSpecializedCooperative> || cute::is_same_v<KernelScheduleType, KernelCpAsyncWarpSpecializedPingpong>) && detail::is_use_rmem_A<ElementA, GmemLayoutATag, ElementB, GmemLayoutBTag>() > > { static_assert(is_static<TileShape_MNK>::value); static_assert(is_static<ClusterShape_MNK>::value); #ifndef CUTLASS_SM90_COLLECTIVE_BUILDER_SUPPORTED static_assert(cutlass::detail::dependent_false<ElementA>, "Unsupported Toolkit for SM90 Collective Builder\n"); #endif // For fp32 types, map to tf32 MMA value type using ElementAMma = cute::conditional_t<cute::is_same_v<ElementA, float>, tfloat32_t, ElementA>; using ElementBMma = cute::conditional_t<cute::is_same_v<ElementB, float>, tfloat32_t, ElementB>; static_assert(detail::is_aligned<ElementA, AlignmentA, ElementB, AlignmentB, detail::cp_async_min_alignment_bytes>(), "Minimum alignment required for cp.async is 4B."); static constexpr cute::GMMA::Major GmmaMajorA = detail::gmma_rs_tag_to_major_A<GmemLayoutATag>(); static constexpr cute::GMMA::Major GmmaMajorB = detail::gmma_rs_tag_to_major_B<GmemLayoutBTag>(); static constexpr bool SwapAB = detail::is_swapAB<ElementA, GmemLayoutATag, ElementB, GmemLayoutBTag>(); static constexpr bool IsWarpSpecializedTransposeB = detail::is_warpspecialized_transpose_B< ElementA, GmemLayoutATag, ElementB, GmemLayoutBTag, KernelScheduleType>(); using AtomLayoutMNK = cute::conditional_t<cute::is_same_v<KernelScheduleType, KernelCpAsyncWarpSpecializedCooperative>, Layout<Shape<cute::Int<(size<0>(TileShape_MNK{}) < 128) ? 1 : 2>,_1,_1>>, Layout<Shape<_1,_1,_1>>>; using TiledMma = decltype(cute::make_tiled_mma(cute::GMMA::rs_op_selector< ElementAMma, ElementBMma, ElementAccumulator, TileShape_MNK, GMMA::Major::K, GMMA::Major::K>(), AtomLayoutMNK{})); static constexpr int NumLoadWarpGroups = 1; using GmemTiledCopyA = decltype(detail::make_cp_async_gmem_tiled_copy< NumThreadsPerWarpGroup * NumLoadWarpGroups, ElementA, AlignmentA, TagToStrideA_t<GmemLayoutATag>, decltype(cute::get<0>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{}))>()); using GmemTiledCopyB = decltype(detail::make_cp_async_gmem_tiled_copy< NumThreadsPerWarpGroup * NumLoadWarpGroups, ElementB, AlignmentB, TagToStrideB_t<GmemLayoutBTag>, decltype(cute::get<1>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{}))>()); using SmemLayoutAtomA = decltype(detail::rs_smem_selector<GmmaMajorA, ElementAMma, decltype(cute::get<0>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{})), IsWarpSpecializedTransposeB>()); using SmemLayoutAtomB = decltype(detail::rs_smem_selector<GmmaMajorB, ElementBMma, decltype(cute::get<1>(TileShape_MNK{})), decltype(cute::get<2>(TileShape_MNK{})), IsWarpSpecializedTransposeB>()); static constexpr int PipelineStages = detail::compute_stage_count_or_override< detail::sm90_smem_capacity_bytes, ElementAMma, ElementBMma, TileShape_MNK>(StageCountType{}); using DispatchPolicy = MainloopSm90CpAsyncGmmaRmemAWarpSpecialized< PipelineStages, ClusterShape_MNK, KernelScheduleType>; using SmemCopyAtomA = cute::conditional_t<SwapAB, void, Copy_Atom<cute::DefaultCopy, ElementA>>; using SmemCopyAtomB = cute::conditional_t<SwapAB, Copy_Atom<cute::DefaultCopy, ElementB>, void>; using CollectiveOp = CollectiveMma< DispatchPolicy, TileShape_MNK, ElementA, TagToStrideA_t<GmemLayoutATag>, ElementB, TagToStrideB_t<GmemLayoutBTag>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // GMMA auto kernel schedule template < class ElementA, class GmemLayoutATag, int AlignmentA, class ElementB, class GmemLayoutBTag, int AlignmentB, class ElementAccumulator, class TileShape_MNK, class ClusterShape_MNK, class StageCountType, class KernelScheduleType > struct CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, ElementA, GmemLayoutATag, AlignmentA, ElementB, GmemLayoutBTag, AlignmentB, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, StageCountType, KernelScheduleType, cute::enable_if_t<cute::is_same_v<KernelScheduleType, KernelScheduleAuto>> > { static_assert(is_static<TileShape_MNK>::value); static_assert(is_static<ClusterShape_MNK>::value); #ifndef CUTLASS_SM90_COLLECTIVE_BUILDER_SUPPORTED static_assert(cutlass::detail::dependent_false<ElementA>, "Unsupported Toolkit for SM90 Collective Builder\n"); #endif using ExtractedElementA = detail::deduce_mixed_width_dtype_t<0, ElementA>; using ExtractedElementB = detail::deduce_mixed_width_dtype_t<0, ElementB>; static constexpr bool IsTmaCompatible = detail::is_aligned< ExtractedElementA, AlignmentA, ExtractedElementB, AlignmentB, detail::tma_alignment_bytes>(); // Users opt into scales via the builder by passing a tuple of Elements for the input that will be scaled. We detect // scale support if ONLY one of the inputs have tuples to describe them. static constexpr bool OnlyOneIsTuple = cute::is_tuple<ElementA>::value ^ cute::is_tuple<ElementB>::value; static constexpr bool IsDifferentWidth = sizeof_bits<ExtractedElementA>::value != sizeof_bits<ExtractedElementB>::value; static constexpr bool IsMixedWidthInput = IsDifferentWidth || (IsDifferentWidth && OnlyOneIsTuple); #if ((__CUDACC_VER_MAJOR__ > 12) || ((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ >= 1))) // Persistent schedules perform best for CUDA Toolkits with version >= 12.1 // KernelTmaWarpSpecializedCooperative requires TileShape_M to be at least 128 using KernelTmaWarpSpecializedScheduleSameInput = cute::conditional_t<size<0>(TileShape_MNK{}) == Int<64>{}, KernelTmaWarpSpecializedPingpong, KernelTmaWarpSpecializedCooperative>; using KernelTmaWarpSpecializedScheduleMixedInput = cute::conditional_t<size<0>(TileShape_MNK{}) == Int<64>{}, KernelTmaWarpSpecializedPingpongMixedInput, KernelTmaWarpSpecializedCooperativeMixedInput>; using KernelTmaWarpSpecializedSchedule = cute::conditional_t<IsMixedWidthInput, KernelTmaWarpSpecializedScheduleMixedInput, KernelTmaWarpSpecializedScheduleSameInput>; #else using KernelTmaWarpSpecializedSchedule = cute::conditional_t<IsMixedWidthInput, KernelTmaWarpSpecializedMixedInput, KernelTmaWarpSpecialized>; #endif // Non-persistent schedule is a safer choice for CpAsync kernels due to register pressure using KernelCpAsyncWarpSpecializedSchedule = KernelCpAsyncWarpSpecialized; using KernelSchedule = cute::conditional_t<IsTmaCompatible, KernelTmaWarpSpecializedSchedule, KernelCpAsyncWarpSpecializedSchedule>; static_assert((cute::is_same_v<KernelSchedule, KernelTmaWarpSpecializedSchedule> && IsMixedWidthInput) || !IsMixedWidthInput, "Only TMA warp specialized kernels are supported for mixed width input."); using CollectiveOp = typename CollectiveBuilder< arch::Sm90, arch::OpClassTensorOp, ElementA, GmemLayoutATag, AlignmentA, ElementB, GmemLayoutBTag, AlignmentB, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, StageCountType, KernelSchedule >::CollectiveOp; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::collective /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/collective/builders/sm90_gmma_builder.inl/0
{ "file_path": "cutlass/include/cutlass/gemm/collective/builders/sm90_gmma_builder.inl", "repo_id": "cutlass", "token_count": 16380 }
29
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a Block-Ell sparse gemm kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/gemm/kernel/ell_gemm.h" #include "cutlass/gemm/kernel/default_ell_gemm.h" #include "cutlass/gemm/device/default_gemm_configuration.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// /*! Blocked-Ell sparse gemm device-level operator. This is an interface to efficient CUTLASS Blocked-Ell kernels that may be invoked from host code. The contributions of this class are: 1. At compile time, it maps data types and high-level structural parameters onto specific CUTLASS components. 2. At runtime, it maps logical arguments to Blocked-Ell problems to kernel parameters. 3. At runtime, it launches kernels on the device. Example of a CUTLASS EllGemm operator is as follows: // // Instantiate the CUTLASS EllGemm operator. // cutlass::gemm::device::EllGemm< cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor, float, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<128, 128, 32>, cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, cutlass::epilogue::thread::LinearCombination< cutlass::half_t, 128 / cutlass::sizeof_bits<cutlass::half_t>::value, float, float>, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, 4, // Stages 128 / cutlass::sizeof_bits<cutlass::half_t>::value, // Alignment A 128 / cutlass::sizeof_bits<cutlass::half_t>::value // Alignment B > ellgemm_op; // // Launch the EllGemm operation on the device // Description of parameters and tensors used to represent the Blocked-Ellpack (ELL) format: a_rows - Rows in the sparse matrix. a_cols - Colums in the sparse matrix. BlockedEllA - Packed matrix (ellValue matrix) that stores non-zero values in consecutive blocks, whose size is (a_rows * a_ell_num_columns) ell_idx - Blocked-ELL Column indices (ellColInd) matrix, whose size is (a_rows / a_ell_blocksize) * (a_ell_num_columns / a_ell_blocksize) a_ell_blocksize - Size of the ELL-Blocks. a_ell_num_columns - Number of columns in the Blocked-Ellpack format (ellValue columns) B - Input dense matrix whose size is (a_cols * n) C/D - Output dense matrix whose size is (a_rows * n) cutlass::Status status = ellgemm_op({ {a_rows, n, a_cols}, // GemmCoord problem_size {BlockedEllA, lda}, // TensorRef<cutlass::half_t, layout::RowMajor> ref_BlockedEllA {B, ldb}, // TensorRef<cutlass::half_t, layout::ColumnMajor> ref_B, {C, ldc}, // TensorRef<float, layout::ColumnMajor> ref_C, {D, ldd}, // TensorRef<float, layout::ColumnMajor> ref_D, ell_idx, // Blocked-ELL Column indices or ellColInd matrix (const int*) a_ell_num_columns, // Columns in the Blocked-Ellpack (ellValue) matrix (int) a_ell_blocksize, // Size of the ELL-Blocks (int) a_ell_base, // Base index of ellColInd (int) - Zero or One {alpha, beta} // EpilogueOutputOp::Params epilogue_op_params }); A simplified view of the template is listed below. template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Element type for C and D matrix operands typename ElementC, /// Layout type for C and D matrix operands typename LayoutC, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for. This is the minimum SM that /// supports the intended feature. The device kernel can be built /// targeting any SM larger than this number. typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages /// Access granularity of A matrix in units of elements int AlignmentA, /// Access granularity of B matrix in units of elements int AlignmentB, /// Supports split-K with serial reduction bool SplitKSerial, /// Operation performed by GEMM typename Operator, /// Sparse matrix is A or not bool IsASparse > class EllGemm; */ template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Element type for internal accumulation typename ElementAccumulator_ = ElementC_, /// Operator class tag typename OperatorClass_ = arch::OpClassTensorOp, /// Tag indicating architecture to tune for typename ArchTag_ = arch::Sm80, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::InstructionShape, /// Epilogue output operator typename EpilogueOutputOp_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_ = typename threadblock::GemmIdentityThreadblockSwizzle<>, /// Number of stages used in the pipelined mainloop int Stages = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kStages, /// Access granularity of A matrix in units of elements int AlignmentA = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentA, /// Access granularity of B matrix in units of elements int AlignmentB = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentB, /// If true, kernel supports split-K with serial reduction bool SplitKSerial = false, /// Operation performed by GEMM typename Operator_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::Operator, /// Sparse matrix is A or not bool IsASparse = true > class EllGemm { public: using ElementA = ElementA_; using LayoutA = LayoutA_; using TensorRefA = TensorRef<ElementA const, LayoutA>; using ElementB = ElementB_; using LayoutB = LayoutB_; using TensorRefB = TensorRef<ElementB const, LayoutB>; using ElementC = ElementC_; using LayoutC = LayoutC_; using TensorRefC = TensorRef<ElementC const, LayoutC>; using TensorRefD = TensorRef<ElementC, LayoutC>; using ElementAccumulator = ElementAccumulator_; using OperatorClass = OperatorClass_; using ArchTag = ArchTag_; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using EpilogueOutputOp = EpilogueOutputOp_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentB = AlignmentB; static int const kAlignmentC = EpilogueOutputOp::kCount; static bool const kSplitKSerial = SplitKSerial; static ComplexTransform const kTransformA = ComplexTransform::kNone; static ComplexTransform const kTransformB = ComplexTransform::kNone; static bool const kIsASparse = IsASparse; /// Define the kernel using GemmKernel = typename kernel::DefaultEllGemm< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, kStages, kSplitKSerial, Operator, kIsASparse >::GemmKernel; /// Argument structure struct Arguments { // // Data members // GemmCoord problem_size; TensorRef<ElementA const, LayoutA> ref_A; TensorRef<ElementB const, LayoutB> ref_B; TensorRef<ElementC const, LayoutC> ref_C; TensorRef<ElementC, LayoutC> ref_D; const int* ell_idx; int ell_ncol; int ell_blocksize; int ell_base_idx; typename EpilogueOutputOp::Params epilogue; int split_k_slices; // // Methods // /// Default ctor CUTLASS_HOST_DEVICE Arguments(): problem_size(0, 0, 0), split_k_slices(1) { } /// Constructs an Arguments structure CUTLASS_HOST_DEVICE Arguments( GemmCoord problem_size_, TensorRef<ElementA const, LayoutA> ref_A_, TensorRef<ElementB const, LayoutB> ref_B_, TensorRef<ElementC const, LayoutC> ref_C_, TensorRef<ElementC, LayoutC> ref_D_, const int* ell_idx_, int ell_ncol_, int ell_blocksize_, int ell_base_idx_, typename EpilogueOutputOp::Params epilogue_ = typename EpilogueOutputOp::Params(), int split_k_slices = 1 ): problem_size(problem_size_), ref_A(ref_A_), ref_B(ref_B_), ref_C(ref_C_), ref_D(ref_D_), ell_idx(ell_idx_), ell_ncol(ell_ncol_), ell_blocksize(ell_blocksize_), ell_base_idx(ell_base_idx_), epilogue(epilogue_), split_k_slices(split_k_slices) { } }; private: /// Kernel parameters object typename GemmKernel::Params params_{}; public: /// Constructs the GEMM. EllGemm() { } /// Determines whether the GEMM can execute the given problem. static Status can_implement(Arguments const &args) { if (!kSplitKSerial && args.split_k_slices > 1) { return Status::kErrorInvalidProblem; } Status status = GemmKernel::can_implement( args.problem_size, args.ref_A.non_const_ref(), args.ref_B.non_const_ref(), args.ref_C.non_const_ref(), args.ref_D ); if (status != Status::kSuccess) { return status; } return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { size_t bytes = 0; // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape( args.problem_size, {args.ell_blocksize, ThreadblockShape::kN, ThreadblockShape::kK}, args.split_k_slices); tiled_shape.m() *= (args.ell_blocksize + ThreadblockShape::kM - 1 ) / ThreadblockShape::kM; if (kSplitKSerial && args.split_k_slices > 1) { bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n()); } return bytes; } Status set(Arguments const &args, cutlass::gemm::GemmCoord const &grid_shape, void *workspace){ // Initialize the Params structure params_ = typename GemmKernel::Params{ args.problem_size, grid_shape, args.ref_A.non_const_ref(), args.ref_B.non_const_ref(), args.ref_C.non_const_ref(), args.ref_D, args.ell_idx, args.ell_ncol, args.ell_blocksize, args.ell_base_idx, args.epilogue, static_cast<int *>(workspace) }; return Status::kSuccess; } /// Initializes GEMM state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape( args.problem_size, {args.ell_blocksize, ThreadblockShape::kN, ThreadblockShape::kK}, args.split_k_slices); grid_shape.m() *= (args.ell_blocksize + ThreadblockShape::kM - 1 ) / ThreadblockShape::kM; if (kSplitKSerial) { if (args.split_k_slices > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } size_t bytes = get_workspace_size(args); cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream); if (result != cudaSuccess) { return Status::kErrorInternal; } } } else { if (args.split_k_slices > 1) { return Status::kErrorInvalidProblem; } } return set(args, grid_shape, workspace); } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { if (kSplitKSerial && args.split_k_slices > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } } params_.ref_A.reset(args.ref_A.non_const_ref().data()); params_.ref_B.reset(args.ref_B.non_const_ref().data()); params_.ref_C.reset(args.ref_C.non_const_ref().data()); params_.ref_D.reset(args.ref_D.data()); params_.output_op = args.epilogue; params_.semaphore = static_cast<int *>(workspace); return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); dim3 block(GemmKernel::kThreadCount, 1, 1); cudaError_t result; int smem_size = int(sizeof(typename GemmKernel::SharedStorage)); if (smem_size >= (48 << 10)) { result = cudaFuncSetAttribute(Kernel<GemmKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } cutlass::Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params_); result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace); if (status == Status::kSuccess) { status = run(stream); } return status; } }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for column-major output exchanges problem size and operand. template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Element type for C and D matrix operands typename ElementC_, /// Element type for internal accumulation typename ElementAccumulator_, /// Operator class tag typename OperatorClass_, /// Tag indicating architecture to tune for typename ArchTag_, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape_, /// Warp-level tile size (concept: GemmShape) typename WarpShape_, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_, /// Epilogue output operator typename EpilogueOutputOp_, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_, /// Number of stages used in the pipelined mainloop int Stages, /// Access granularity of A matrix in units of elements int AlignmentA, /// Access granularity of B matrix in units of elements int AlignmentB, /// If true, kernel supports split-K as a serial reduction bool SplitKSerial, /// Operation performed by GEMM typename Operator_, /// Sparse matrix is A or not bool IsASparse> class EllGemm<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, layout::ColumnMajor, // partially specialized on LayoutC ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_, WarpShape_, InstructionShape_, EpilogueOutputOp_, ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB, SplitKSerial, Operator_, IsASparse> { public: using ElementA = ElementA_; using LayoutA = LayoutA_; using TensorRefA = TensorRef<ElementA const, LayoutA>; using ElementB = ElementB_; using LayoutB = LayoutB_; using TensorRefB = TensorRef<ElementB const, LayoutB>; using ElementC = ElementC_; using LayoutC = layout::ColumnMajor; using TensorRefC = TensorRef<ElementC const, LayoutC>; using TensorRefD = TensorRef<ElementC, LayoutC>; using ElementAccumulator = ElementAccumulator_; using OperatorClass = OperatorClass_; using ArchTag = ArchTag_; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using EpilogueOutputOp = EpilogueOutputOp_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentB = AlignmentB; static ComplexTransform const kTransformA = ComplexTransform::kNone; static ComplexTransform const kTransformB = ComplexTransform::kNone; static bool const kSplitKSerial = SplitKSerial; static bool const kIsASparse = false; using UnderlyingOperator = EllGemm< ElementB, typename layout::LayoutTranspose<LayoutB>::type, ElementA, typename layout::LayoutTranspose<LayoutA>::type, ElementC, layout::RowMajor, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, kAlignmentB, kAlignmentA, SplitKSerial, Operator, kIsASparse >; using UnderlyingArguments = typename UnderlyingOperator::Arguments; using GemmKernel = typename UnderlyingOperator::GemmKernel; static int const kAlignmentC = UnderlyingOperator::kAlignmentC; /// Argument structure struct Arguments { // // Data members // GemmCoord problem_size; TensorRef<ElementA const, LayoutA> ref_A; TensorRef<ElementB const, LayoutB> ref_B; TensorRef<ElementC const, LayoutC> ref_C; TensorRef<ElementC, LayoutC> ref_D; const int* ell_idx; int ell_ncol; int ell_blocksize; int ell_base_idx; typename EpilogueOutputOp::Params epilogue; int split_k_slices; // // Methods // /// Default ctor CUTLASS_HOST_DEVICE Arguments() { } /// Constructs an Arguments structure CUTLASS_HOST_DEVICE Arguments( GemmCoord problem_size_, TensorRef<ElementA const, LayoutA> ref_A_, TensorRef<ElementB const, LayoutB> ref_B_, TensorRef<ElementC const, LayoutC> ref_C_, TensorRef<ElementC, LayoutC> ref_D_, const int* ell_idx_, int ell_ncol_, int ell_blocksize_, int ell_base_idx_, typename EpilogueOutputOp::Params epilogue_ = typename EpilogueOutputOp::Params(), int split_k_slices = 1 ): problem_size(problem_size_), ref_A(ref_A_), ref_B(ref_B_), ref_C(ref_C_), ref_D(ref_D_), ell_idx(ell_idx_), ell_ncol(ell_ncol_), ell_blocksize(ell_blocksize_), ell_base_idx(ell_base_idx_), epilogue(epilogue_), split_k_slices(split_k_slices) { } }; private: UnderlyingOperator underlying_operator_; public: /// Constructs the GEMM. EllGemm() { } /// Helper to construct a transposed equivalent for the underying GEMM operator static UnderlyingArguments to_underlying_arguments(Arguments const &args) { return UnderlyingArguments( {args.problem_size.n(), args.problem_size.m(), args.problem_size.k()}, {args.ref_B.data(), args.ref_B.stride(0)}, {args.ref_A.data(), args.ref_A.stride(0)}, {args.ref_C.data(), args.ref_C.stride(0)}, {args.ref_D.data(), args.ref_D.stride(0)}, args.ell_idx, args.ell_ncol, args.ell_blocksize, args.ell_base_idx, args.epilogue, args.split_k_slices ); } /// Determines whether the GEMM can execute the given problem. static Status can_implement(Arguments const &args) { return UnderlyingOperator::can_implement(to_underlying_arguments(args)); } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { size_t bytes = 0; // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape( args.problem_size, {ThreadblockShape::kM, args.ell_blocksize, ThreadblockShape::kK}, args.split_k_slices); tiled_shape.n() *= (args.ell_blocksize + ThreadblockShape::kN - 1 ) / ThreadblockShape::kN; if (kSplitKSerial && args.split_k_slices > 1) { bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n()); } return bytes; } Status set(Arguments const &args, cutlass::gemm::GemmCoord const &grid_shape, void *workspace){ // Initialize the Params structure return underlying_operator_.set(to_underlying_arguments(args), grid_shape, workspace); } /// Initializes GEMM state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape( {args.problem_size.n(), args.problem_size.m(), args.problem_size.k()}, {ThreadblockShape::kM, args.ell_blocksize, ThreadblockShape::kK}, args.split_k_slices); grid_shape.n() *= (args.ell_blocksize + ThreadblockShape::kN - 1 ) / ThreadblockShape::kN; if (kSplitKSerial) { if (args.split_k_slices > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } size_t bytes = get_workspace_size(args); cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream); if (result != cudaSuccess) { return Status::kErrorInternal; } } } else { if (args.split_k_slices > 1) { return Status::kErrorInvalidProblem; } } // Initialize the Params structure set(args, grid_shape, workspace); return Status::kSuccess; } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { return underlying_operator_.update(to_underlying_arguments(args), workspace); } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { return underlying_operator_.run(stream); } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/device/ell_gemm.h/0
{ "file_path": "cutlass/include/cutlass/gemm/device/ell_gemm.h", "repo_id": "cutlass", "token_count": 10635 }
30
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Problem visitor for grouped GEMMs */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/semaphore.h" #include "cutlass/layout/matrix.h" #include "cutlass/trace.h" #include "cutlass/gemm/kernel/gemm_transpose_operands.h" #include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform bool Transposed = false > struct GemmGrouped { public: using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_; static bool const kTransposed = Transposed; // Optional transpose using MapArguments = kernel::detail::MapArguments< typename Mma::IteratorA::Element, typename Mma::IteratorA::Layout, Mma::kTransformA, Mma::IteratorA::AccessType::kElements, typename Mma::IteratorB::Element, typename Mma::IteratorB::Layout, Mma::kTransformB, Mma::IteratorB::AccessType::kElements, typename Mma::LayoutC, kTransposed >; // Public-facing type definitions related to operand element type, layout, and complex conjugate // operation. Must interact with the 'kTransposed' notion. using ElementA = typename MapArguments::ElementA; using LayoutA = typename MapArguments::LayoutA; using ElementB = typename MapArguments::ElementB; using LayoutB = typename MapArguments::LayoutB; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename MapArguments::LayoutC; static ComplexTransform const kTransformA = MapArguments::kTransformA; static ComplexTransform const kTransformB = MapArguments::kTransformB; // Type definitions about the mainloop. using Operator = typename Mma::Operator; using OperatorClass = typename Mma::Operator::OperatorClass; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename Mma::Operator::Shape; using InstructionShape = typename Mma::Policy::Operator::InstructionShape; using ArchTag = typename Mma::ArchTag; static int const kStages = Mma::kStages; static int const kAlignmentA = MapArguments::kAlignmentA; static int const kAlignmentB = MapArguments::kAlignmentB; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; using ProblemVisitor = GemmGroupedProblemVisitor< ThreadblockShape, kGroupScheduleMode, kThreadCount, kThreadCount, kTransposed>; // // Structures // /// Argument structure struct Arguments { // // Data members // GemmCoord *problem_sizes{nullptr}; int problem_count{0}; int threadblock_count{0}; typename EpilogueOutputOp::Params output_op{}; ElementA ** ptr_A{nullptr}; ElementB ** ptr_B{nullptr}; ElementC ** ptr_C{nullptr}; ElementC ** ptr_D{nullptr}; typename LayoutA::Stride::LongIndex *lda{nullptr}; typename LayoutB::Stride::LongIndex *ldb{nullptr}; typename LayoutC::Stride::LongIndex *ldc{nullptr}; typename LayoutC::Stride::LongIndex *ldd{nullptr}; // Only used by device-level operator GemmCoord *host_problem_sizes{nullptr}; // // Methods // /// Default ctor Arguments() = default; /// Ctor CUTLASS_HOST_DEVICE Arguments( GemmCoord *problem_sizes, int problem_count, int threadblock_count, typename EpilogueOutputOp::Params output_op, ElementA ** ptr_A, ElementB ** ptr_B, ElementC ** ptr_C, ElementC ** ptr_D, typename LayoutA::Stride::LongIndex *lda, typename LayoutB::Stride::LongIndex *ldb, typename LayoutC::Stride::LongIndex *ldc, typename LayoutC::Stride::LongIndex *ldd, GemmCoord *host_problem_sizes=nullptr ): problem_sizes(problem_sizes), problem_count(problem_count), threadblock_count(threadblock_count), output_op(output_op), ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), host_problem_sizes(host_problem_sizes) { } }; // // Structure for precomputing values in host memory and passing to kernels // /// Parameters structure struct Params { typename ProblemVisitor::Params problem_visitor{}; int threadblock_count{0}; typename EpilogueOutputOp::Params output_op{}; ElementA ** ptr_A{nullptr}; ElementB ** ptr_B{nullptr}; ElementC ** ptr_C{nullptr}; ElementC ** ptr_D{nullptr}; typename LayoutA::Stride::LongIndex *lda{nullptr}; typename LayoutB::Stride::LongIndex *ldb{nullptr}; typename LayoutC::Stride::LongIndex *ldc{nullptr}; typename LayoutC::Stride::LongIndex *ldd{nullptr}; // // Methods // Params() = default; CUTLASS_HOST_DEVICE Params(Arguments const &args, void *workspace = nullptr, int tile_count = 0): problem_visitor(args.problem_sizes, args.problem_count, workspace, tile_count), threadblock_count(args.threadblock_count), output_op(args.output_op), ptr_A(args.ptr_A), ptr_B(args.ptr_B), ptr_C(args.ptr_C), ptr_D(args.ptr_D), lda(args.lda), ldb(args.ldb), ldc(args.ldc), ldd(args.ldd) { } CUTLASS_HOST_DEVICE void update( Arguments const &args, void *workspace = nullptr, int tile_count = 0) { problem_visitor = typename ProblemVisitor::Params(args.problem_sizes, args.problem_count, workspace, tile_count); threadblock_count = args.threadblock_count; output_op = args.output_op; ptr_A = args.ptr_A; ptr_B = args.ptr_B; ptr_C = args.ptr_C; ptr_D = args.ptr_D; lda = args.lda; ldb = args.ldb; ldc = args.ldc; ldd = args.ldd; } }; /// Shared memory storage structure struct SharedStorage { union { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; } kernel; // ProblemVisitor shared storage can't be overlapped with others typename ProblemVisitor::SharedStorage problem_visitor; }; public: // // Methods // CUTLASS_DEVICE GemmGrouped() { } /// Determines whether kernel satisfies alignment static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) { return Status::kSuccess; } static Status can_implement(Arguments const &args) { return Status::kSuccess; } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // // These types shadow the type-level definitions and support the ability to implement // a 'transposed' GEMM that computes the transposed problems. // using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; // // Problem visitor. // ProblemVisitor problem_visitor( params.problem_visitor, shared_storage.problem_visitor, blockIdx.x); // Outer 'persistent' loop to iterate over tiles while (problem_visitor.next_tile()) { GemmCoord problem_size = problem_visitor.problem_size(); int32_t problem_idx = problem_visitor.problem_index(); int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx()); GemmCoord grid_shape = problem_visitor.grid_shape(problem_size); cutlass::gemm::GemmCoord threadblock_offset( int(threadblock_idx / grid_shape.n()) * Mma::Shape::kM, int(threadblock_idx % grid_shape.n()) * Mma::Shape::kN, 0); // Load element pointers. Exchange pointers and strides if working on the transpose ElementA *ptr_A = reinterpret_cast<ElementA *>((kTransposed ? params.ptr_B[problem_idx] : params.ptr_A[problem_idx])); typename LayoutA::LongIndex ldm_A = (kTransposed ? params.ldb[problem_idx] : params.lda[problem_idx]); ElementB *ptr_B = reinterpret_cast<ElementB *>((kTransposed ? params.ptr_A[problem_idx] : params.ptr_B[problem_idx])); typename LayoutB::LongIndex ldm_B = (kTransposed ? params.lda[problem_idx] : params.ldb[problem_idx]); // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_offset.m(), 0, }; cutlass::MatrixCoord tb_offset_B{ 0, threadblock_offset.n() }; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( LayoutA(ldm_A), ptr_A, {problem_size.m(), problem_size.k()}, thread_idx, tb_offset_A); typename Mma::IteratorB iterator_B( LayoutB(ldm_B), ptr_B, {problem_size.k(), problem_size.n()}, thread_idx, tb_offset_B); typename Mma::FragmentC accumulators; accumulators.clear(); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Matrix multiply phase // // Construct thread-scoped matrix multiply Mma mma(shared_storage.kernel.main_loop, thread_idx, warp_idx, lane_idx); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; // Wait for all threads to finish their epilogue phases from the previous tile. __syncthreads(); // Compute threadblock-scoped matrix multiply-add mma( gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); // // Epilogue // EpilogueOutputOp output_op(params.output_op); ElementC *ptr_C = params.ptr_C[problem_idx]; ElementC *ptr_D = params.ptr_D[problem_idx]; LayoutC layout_C(params.ldc[problem_idx]); LayoutC layout_D(params.ldd[problem_idx]); typename Epilogue::OutputTileIterator::Params params_C(layout_C); typename Epilogue::OutputTileIterator::Params params_D(layout_D); // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C( params_C, ptr_C, problem_size.mn(), thread_idx, threadblock_offset.mn() ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params_D, ptr_D, problem_size.mn(), thread_idx, threadblock_offset.mn() ); Epilogue epilogue( shared_storage.kernel.epilogue, thread_idx, warp_idx, lane_idx); // Execute the epilogue operator to update the destination tensor. epilogue( output_op, iterator_D, accumulators, iterator_C); // Next tile problem_visitor.advance(gridDim.x); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/gemm_grouped.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/gemm_grouped.h", "repo_id": "cutlass", "token_count": 5488 }
31
/*************************************************************************************************** * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Gemm kernel with an epilogue that computes the absolute maximum value of the output and a pre-activation-function auxiliary output. The auxiliary output is also (optionally) stored to global memory. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/layout/layout.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/semaphore.h" #include "cutlass/gemm/kernel/params_universal_base.h" #include "cutlass/trace.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// // Gemm that computes the absolute maximum value of the output and a pre-activation-function // auxiliary output. template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_ ///! Threadblock swizzling function > struct GemmWithAbsMax { public: using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; static ComplexTransform const kTransformA = Mma::kTransformA; static ComplexTransform const kTransformB = Mma::kTransformB; using Operator = typename Mma::Operator; using OperatorClass = typename Mma::Operator::OperatorClass; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename Mma::Operator::Shape; using InstructionShape = typename Mma::Policy::Operator::InstructionShape; using ArchTag = typename Mma::ArchTag; static int const kStages = Mma::kStages; static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Split-K preserves splits that are 128b aligned static int const kSplitKAlignment = const_max( 128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value ); // // Structures // /// Argument structure struct Arguments : UniversalArgumentsBase { // // Data members // typename EpilogueOutputOp::Params epilogue; void const * ptr_A; void const * ptr_B; void const * ptr_C; void * ptr_D; void * ptr_Aux; void * ptr_Vector; int64_t batch_stride_A; int64_t batch_stride_B; int64_t batch_stride_C; int64_t batch_stride_Vector; typename LayoutA::Stride::Index lda; typename LayoutB::Stride::Index ldb; typename LayoutC::Stride::Index ldc; typename LayoutC::Stride::Index ldd; typename LayoutC::Stride::Index ldaux; typename LayoutC::Stride::Index ldr; // // Methods // Arguments(): ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr), ptr_Aux(nullptr) {} /// Constructs an arguments structure with ldaux Arguments( GemmUniversalMode mode, GemmCoord problem_size, int batch_count, typename EpilogueOutputOp::Params epilogue, void const * ptr_A, void const * ptr_B, void const * ptr_C, void * ptr_D, void * ptr_Aux, void * ptr_Vector, int64_t batch_stride_A, int64_t batch_stride_B, int64_t batch_stride_C, int64_t batch_stride_D, int64_t batch_stride_Vector, typename LayoutA::Stride::Index lda, typename LayoutB::Stride::Index ldb, typename LayoutC::Stride::Index ldc, typename LayoutC::Stride::Index ldd, typename LayoutC::Stride::Index ldr, typename LayoutC::Stride::Index ldaux) : UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), epilogue(epilogue), ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), ptr_Aux(ptr_Aux), ptr_Vector(ptr_Vector), batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_Vector(batch_stride_Vector), lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ldaux(ldaux), ldr(ldr) { } /// Constructs an Arguments structure without ldaux. /// These parameters are overridden with D batch stride and ldd. Arguments( GemmUniversalMode mode, GemmCoord problem_size, int batch_count, typename EpilogueOutputOp::Params epilogue, void const * ptr_A, void const * ptr_B, void const * ptr_C, void * ptr_D, void * ptr_Aux, void * ptr_Vector, int64_t batch_stride_A, int64_t batch_stride_B, int64_t batch_stride_C, int64_t batch_stride_D, int64_t batch_stride_Vector, typename LayoutA::Stride::Index lda, typename LayoutB::Stride::Index ldb, typename LayoutC::Stride::Index ldc, typename LayoutC::Stride::Index ldd, typename LayoutC::Stride::Index ldr) : Arguments(mode, problem_size, batch_count, epilogue, ptr_A, ptr_B, ptr_C, ptr_D, ptr_Aux, ptr_Vector, batch_stride_A, batch_stride_B, batch_stride_C, batch_stride_D, batch_stride_Vector, lda, ldb, ldc, ldd, ldr, ldd) { } /// Returns arguments for the transposed problem Arguments transposed_problem() const { Arguments args(*this); std::swap(args.problem_size.m(), args.problem_size.n()); std::swap(args.ptr_A, args.ptr_B); std::swap(args.lda, args.ldb); std::swap(args.batch_stride_A, args.batch_stride_B); return args; } }; // // Structure for precomputing values in host memory and passing to kernels // /// Parameters structure struct Params : UniversalParamsBase< ThreadblockSwizzle, ThreadblockShape, ElementA, ElementB, ElementC, LayoutA, LayoutB> { using ParamsBase = UniversalParamsBase< ThreadblockSwizzle, ThreadblockShape, ElementA, ElementB, ElementC, LayoutA, LayoutB>; // // Data members // typename Mma::IteratorA::Params params_A; typename Mma::IteratorB::Params params_B; typename Epilogue::OutputTileIterator::Params params_C; typename Epilogue::OutputTileIterator::Params params_D; typename Epilogue::AuxOutputTileIterator::Params params_Aux; typename EpilogueOutputOp::Params output_op; void * ptr_A; void * ptr_B; void * ptr_C; void * ptr_D; void * ptr_Aux; void * ptr_Vector; typename LayoutC::Stride::Index ldr; int64_t batch_stride_A; int64_t batch_stride_B; int64_t batch_stride_C; int64_t batch_stride_Vector; // // Host dispatch API // /// Default constructor Params() = default; /// Constructor Params( Arguments const &args, /// GEMM application arguments int device_sms, /// Number of SMs on the device int sm_occupancy) /// Kernel SM occupancy (in thread blocks) : ParamsBase(args, device_sms, sm_occupancy), params_A(args.lda), params_B(args.ldb), params_C(args.ldc), params_D(args.ldd), params_Aux(args.ldaux), output_op(args.epilogue), ptr_A(const_cast<void *>(args.ptr_A)), ptr_B(const_cast<void *>(args.ptr_B)), ptr_C(const_cast<void *>(args.ptr_C)), ptr_D(args.ptr_D), ptr_Aux(args.ptr_Aux), ptr_Vector(args.ptr_Vector), ldr(args.ldr), batch_stride_A(args.batch_stride_A), batch_stride_B(args.batch_stride_B), batch_stride_C(args.batch_stride_C), batch_stride_Vector(args.batch_stride_Vector) { } /// Lightweight update given a subset of arguments. CUTLASS_HOST_DEVICE void update(Arguments const &args) { ptr_A = const_cast<void *>(args.ptr_A); ptr_B = const_cast<void *>(args.ptr_B); ptr_C = const_cast<void *>(args.ptr_C); ptr_D = args.ptr_D; ptr_Aux = args.ptr_Aux; ptr_Vector = args.ptr_Vector; ldr = args.ldr; batch_stride_A = args.batch_stride_A; batch_stride_B = args.batch_stride_B; batch_stride_C = args.batch_stride_C; this->batch_stride_D = args.batch_stride_D; batch_stride_Vector = args.batch_stride_Vector; output_op = args.epilogue; } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; public: // // Host dispatch API // /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size) { static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; bool isAMisaligned = false; bool isBMisaligned = false; bool isCMisaligned = false; if (platform::is_same<LayoutA, layout::RowMajor>::value) { isAMisaligned = problem_size.k() % kAlignmentA; } else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) { isAMisaligned = problem_size.m() % kAlignmentA; } else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value || platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) { isAMisaligned = problem_size.k() % kAlignmentA; } if (platform::is_same<LayoutB, layout::RowMajor>::value) { isBMisaligned = problem_size.n() % kAlignmentB; } else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) { isBMisaligned = problem_size.k() % kAlignmentB; } else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value || platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) { isBMisaligned = problem_size.k() % kAlignmentB; } if (platform::is_same<LayoutC, layout::RowMajor>::value) { isCMisaligned = problem_size.n() % kAlignmentC; } else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) { isCMisaligned = problem_size.m() % kAlignmentC; } else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value || platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) { isCMisaligned = problem_size.n() % kAlignmentC; } if (isAMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); return Status::kErrorMisalignedOperand; } if (isBMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); return Status::kErrorMisalignedOperand; } if (isCMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); return Status::kErrorMisalignedOperand; } CUTLASS_TRACE_HOST(" returning kSuccess"); return Status::kSuccess; } static Status can_implement(Arguments const &args) { return can_implement(args.problem_size); } public: // // Device-only API // // Factory invocation CUTLASS_DEVICE static void invoke( Params const &params, SharedStorage &shared_storage) { GemmWithAbsMax op; op(params, shared_storage); } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } int offset_k = 0; int problem_size_k = params.problem_size.k(); ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A); ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B); // // Fetch pointers based on mode. // if (params.mode == GemmUniversalMode::kGemm || params.mode == GemmUniversalMode::kGemmSplitKParallel) { if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; } offset_k = threadblock_tile_offset.k() * params.gemm_k_size; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; } else if (params.mode == GemmUniversalMode::kArray) { ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()]; ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()]; } __syncthreads(); // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_tile_offset.m() * Mma::Shape::kM, offset_k, }; cutlass::MatrixCoord tb_offset_B{ offset_k, threadblock_tile_offset.n() * Mma::Shape::kN }; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.params_A, ptr_A, {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_A); typename Mma::IteratorB iterator_B( params.params_B, ptr_B, {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; // Compute threadblock-scoped matrix multiply-add mma( gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); // // Epilogue // EpilogueOutputOp output_op(params.output_op); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.n() * Mma::Shape::kN ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C); ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D); typename Epilogue::ElementAuxOutput *ptr_Aux = static_cast<typename Epilogue::ElementAuxOutput *>(params.ptr_Aux); typename Epilogue::ElementVector *ptr_Vector = static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector); // // Fetch pointers based on mode. // // // Special path when split-K not enabled. // if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() == 1) { // Tile iterators loading from source tensors. typename Epilogue::OutputTileIterator iterator_C( params.params_C, ptr_C, params.problem_size.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.problem_size.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to auxiliary tensor. typename Epilogue::AuxOutputTileIterator iterator_Aux( params.params_Aux, ptr_Aux, params.problem_size.mn(), thread_idx, threadblock_offset ); // Construct the epilogue Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Move to appropriate location for this output tile if (ptr_Vector) { ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr; } // Execute the epilogue operator to update the destination tensor. epilogue(output_op, ptr_Vector, iterator_D, accumulators, iterator_C, iterator_Aux, params.problem_size.mn(), threadblock_offset); return; } // // Slower path when split-K or batching is needed // // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); if (params.mode == GemmUniversalMode::kGemm) { // If performing a reduction via split-K, fetch the initial synchronization if (params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } } else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; if (ptr_Aux) { ptr_Aux += threadblock_tile_offset.k() * params.batch_stride_D; } if (ptr_Vector) { ptr_Vector += threadblock_tile_offset.k() * params.batch_stride_Vector; } } else if (params.mode == GemmUniversalMode::kArray) { ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()]; ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()]; if (ptr_Aux) { ptr_Aux = static_cast<typename Epilogue::ElementAuxOutput * const *>(params.ptr_Aux)[threadblock_tile_offset.k()]; } if (ptr_Vector) { ptr_Vector = static_cast<typename Epilogue::ElementVector * const *>(params.ptr_Vector)[threadblock_tile_offset.k()]; } } // Tile iterators loading from source tensors. typename Epilogue::OutputTileIterator iterator_C( params.params_C, ptr_C, params.problem_size.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.problem_size.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to auxiliary destination tensor. typename Epilogue::AuxOutputTileIterator iterator_Aux( params.params_Aux, // Only the final block writes the auxiliary tensor ((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) && (params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1)) ? nullptr : ptr_Aux, params.problem_size.mn(), thread_idx, threadblock_offset ); // Construct the epilogue Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_offset.k()); } // Move to appropriate location for this output tile if (ptr_Vector) { ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr; } // Execute the epilogue operator to update the destination tensor. epilogue(output_op, // Only the final block uses Vector ((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) && (params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1)) ? nullptr : ptr_Vector, iterator_D, accumulators, iterator_C, iterator_Aux, params.problem_size.mn(), threadblock_offset); // // Release the semaphore // if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/gemm_with_absmax.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/gemm_with_absmax.h", "repo_id": "cutlass", "token_count": 9516 }
32
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/kernel_hardware_info.hpp" #include "cute/arch/cluster_sm90.hpp" #include "cutlass/arch/reg_reconfig.h" #include "cutlass/arch/mma_sm90.h" #include "cutlass/epilogue/collective/detail.hpp" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" #include "cutlass/pipeline/pipeline.hpp" #include "cutlass/trace.h" #include "cute/tensor.hpp" /////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::kernel { /////////////////////////////////////////////////////////////////////////////// template < class ProblemShape_, class CollectiveMainloop_, class CollectiveEpilogue_, class TileScheduler_ > class GemmUniversal< ProblemShape_, CollectiveMainloop_, CollectiveEpilogue_, TileScheduler_, cute::enable_if_t<cute::is_base_of_v<KernelTmaWarpSpecialized, typename CollectiveMainloop_::DispatchPolicy::Schedule>>> { public: // // Type Aliases // using ProblemShape = ProblemShape_; static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4, "ProblemShape{} should be <M,N,K> or <M,N,K,L>"); // Mainloop derived types using CollectiveMainloop = CollectiveMainloop_; using TileShape = typename CollectiveMainloop::TileShape; using TiledMma = typename CollectiveMainloop::TiledMma; using ArchTag = typename CollectiveMainloop::ArchTag; using ElementA = typename CollectiveMainloop::ElementA; using StrideA = typename CollectiveMainloop::StrideA; using ElementB = typename CollectiveMainloop::ElementB; using StrideB = typename CollectiveMainloop::StrideB; using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; using ClusterShape = typename DispatchPolicy::ClusterShape; using MainloopArguments = typename CollectiveMainloop::Arguments; using MainloopParams = typename CollectiveMainloop::Params; static_assert(ArchTag::kMinComputeCapability >= 90); // Epilogue derived types using CollectiveEpilogue = CollectiveEpilogue_; using ElementC = typename CollectiveEpilogue::ElementC; using StrideC = typename CollectiveEpilogue::StrideC; using ElementD = typename CollectiveEpilogue::ElementD; using StrideD = typename CollectiveEpilogue::StrideD; using EpilogueArguments = typename CollectiveEpilogue::Arguments; using EpilogueParams = typename CollectiveEpilogue::Params; static_assert(cute::is_void_v<TileScheduler_> or cute::is_same_v<TileScheduler_, PersistentScheduler>, "TMA warp-specialized kernel does not support specializing the tile scheduler."); using TileSchedulerTag = TileScheduler_; using TileScheduler = typename detail::TileSchedulerSelector< TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler; using TileSchedulerArguments = typename TileScheduler::Arguments; // Kernel level shared memory storage struct SharedStorage { // Mainloop and epilogue don't use smem concurrently since kernel is non-persistent, so we can use a union union TensorStorage { using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage; using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage; MainloopTensorStorage mainloop; EpilogueTensorStorage epilogue; } tensors; struct PipelineStorage : cute::aligned_struct<16> { using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage; using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage; alignas(16) MainloopPipelineStorage mainloop; alignas(16) EpiLoadPipelineStorage epi_load; } pipelines; }; static constexpr int SharedStorageSize = sizeof(SharedStorage); static constexpr uint32_t NumLoadWarpGroups = 1; static constexpr uint32_t NumMmaWarpGroups = 1; static constexpr uint32_t MaxThreadsPerBlock = CUTE_STATIC_V(size(TiledMma{})) + (NumLoadWarpGroups * NumThreadsPerWarpGroup); static constexpr uint32_t MinBlocksPerMultiprocessor = 1; // Device side arguments struct Arguments { GemmUniversalMode mode{}; ProblemShape problem_shape{}; MainloopArguments mainloop{}; EpilogueArguments epilogue{}; KernelHardwareInfo hw_info{}; TileSchedulerArguments scheduler{}; }; // Kernel entry point API struct Params { GemmUniversalMode mode{}; ProblemShape problem_shape{}; MainloopParams mainloop{}; EpilogueParams epilogue{}; }; // // Methods // // Convert to underlying arguments. In this case, a simple copy for the aliased type. static Params to_underlying_arguments(Arguments const& args, void* workspace) { (void) workspace; auto problem_shape = args.problem_shape; if constexpr (detail::IF_SWAP_AB<CollectiveMainloop>::value) { // swap M/N get<0>(problem_shape) = get<1>(args.problem_shape); get<1>(problem_shape) = get<0>(args.problem_shape); } return { args.mode, problem_shape, CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace), CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace) }; } CUTLASS_HOST_DEVICE static bool can_implement(Arguments const& args) { bool implementable = (args.mode == GemmUniversalMode::kGemm) or (args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4); if (!implementable) { CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n"); return implementable; } implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); implementable &= TileScheduler::can_implement(args.scheduler); return implementable; } static size_t get_workspace_size(Arguments const& args) { return 0; } static cutlass::Status initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr, CudaHostAdapter* cuda_adapter = nullptr) { return Status::kSuccess; } // Computes the kernel launch grid shape based on runtime parameters static dim3 get_grid_shape(Params const& params) { auto cluster_shape = ClusterShape{}; auto tile_shape = TileShape{}; auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); return TileScheduler::get_tiled_cta_shape_mnl( problem_shape_MNKL, tile_shape, cluster_shape); } static dim3 get_block_shape() { return dim3(MaxThreadsPerBlock, 1, 1); } CUTLASS_DEVICE void operator()(Params const& params, char* smem_buf) { using namespace cute; using X = Underscore; // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); #else enum class WarpGroupRole { Producer = 0, Consumer = 1, }; enum class ProducerWarpRole { MainloopEpilogue = 0, Warp1 = 1, Warp2 = 2, Warp3 = 3 }; // Kernel level shared memory storage SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(smem_buf); int thread_idx = int(threadIdx.x); int lane_idx = canonical_lane_idx(); int warp_idx = canonical_warp_idx_sync(); int warp_idx_in_warp_group = warp_idx % NumWarpsPerWarpGroup; int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup; auto warp_group_role = WarpGroupRole(canonical_warp_group_idx()); auto producer_warp_role = ProducerWarpRole(warp_idx_in_warp_group); int lane_predicate = cute::elect_one_sync(); uint32_t block_rank_in_cluster = cute::block_rank_in_cluster(); // Issue Tma Descriptor Prefetch from a single thread if ((warp_idx == 0) && lane_predicate) { CollectiveMainloop::prefetch_tma_descriptors(params.mainloop); CollectiveEpilogue::prefetch_tma_descriptors(params.epilogue); } // Mainloop Load pipeline using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline; typename MainloopPipeline::Params mainloop_pipeline_params; if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::MainloopEpilogue) { mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; } if (warp_group_role == WarpGroupRole::Consumer) { mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; } mainloop_pipeline_params.is_leader = warp_group_thread_idx == 0; mainloop_pipeline_params.num_consumers = NumThreadsPerWarpGroup; mainloop_pipeline_params.transaction_bytes = CollectiveMainloop::TmaTransactionBytes; MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params, ClusterShape{}); // Epilogue Load pipeline using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline; typename EpiLoadPipeline::Params epi_load_pipeline_params; if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::MainloopEpilogue) { epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer; } if (warp_group_role == WarpGroupRole::Consumer) { epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer; } epi_load_pipeline_params.dst_blockid = cute::block_rank_in_cluster(); epi_load_pipeline_params.producer_arv_count = NumThreadsPerWarp; epi_load_pipeline_params.consumer_arv_count = NumThreadsPerWarpGroup; epi_load_pipeline_params.transaction_bytes = CollectiveEpilogue::TmaTransactionBytes; EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params); // Epilogue Store pipeline using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline; typename EpiStorePipeline::Params epi_store_pipeline_params; epi_store_pipeline_params.always_wait = true; EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params); // Initialize starting pipeline states for the collectives // Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding) typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state; typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state; // For the DMA Load (producer) we start with an opposite phase // i.e., we skip all waits since we know that the buffer is indeed empty PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state<MainloopPipeline>(); PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state<EpiLoadPipeline>(); PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state<EpiStorePipeline>(); auto cluster_wait_fn = [&] () { // We need this to guarantee that the Pipeline init is visible // To all producers and consumer thread blocks in the Cluster if constexpr (size(ClusterShape{}) > 1) { cute::cluster_arrive_relaxed(); return [] () { cute::cluster_wait(); }; } else { __syncthreads(); return [] () {}; // do nothing } } (); // Preconditions static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); // Optionally append 1s until problem shape is rank-4 in case it is only rank-3 (MNK) auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); // Get the appropriate blocks for this thread block -- potential for thread block locality auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) TiledMma tiled_mma; // In a warp specialized kernel, collectives expose data movement and compute operations separately CollectiveMainloop collective_mainloop; CollectiveEpilogue collective_epilogue(params.epilogue, shared_storage.tensors.epilogue); // Prepare and partition the input tensors. Expects a tuple of tensors where: // get<0>(load_inputs) is the tma tensor A after local tiling so that it has shape (BLK_M,BLK_K,m,k,l) // get<1>(load_inputs) is the tma tensor B after local tiling so that it has shape (BLK_N,BLK_K,n,k,l) auto load_inputs = collective_mainloop.load_init(problem_shape_MNKL, params.mainloop); static_assert(cute::tuple_size_v<decltype(load_inputs)> >= 2, "Output of load_init must have at least two elements (A, B)"); // Extract out partitioned A and B. Tensor gA_mkl = get<0>(load_inputs); Tensor gB_nkl = get<1>(load_inputs); // Compute m_coord, n_coord, and l_coord with their post-tiled shapes auto m_coord = idx2crd(int(blockIdx.x), shape<2>(gA_mkl)); auto n_coord = idx2crd(int(blockIdx.y), shape<2>(gB_nkl)); auto l_coord = idx2crd(int(blockIdx.z), shape<4>(gB_nkl)); auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); // Get pipeline iterators and increments from tensor shapes auto k_tile_iter = cute::make_coord_iterator(shape<3>(gA_mkl)); auto k_tile_count = size<3>(gA_mkl); // Wait for all thread blocks in the Cluster cluster_wait_fn(); if (warp_group_role == WarpGroupRole::Producer) { if (producer_warp_role == ProducerWarpRole::MainloopEpilogue) { collective_mainloop.load( params.mainloop, mainloop_pipeline, mainloop_pipe_producer_state, load_inputs, blk_coord, k_tile_iter, k_tile_count, lane_idx, block_rank_in_cluster, shared_storage.tensors.mainloop ); // Update starting mainloop pipeline state for the pipeline drain mainloop_pipe_producer_state.advance(k_tile_count); // Make sure mainloop consumer has been waited upon before issuing epilogue load collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state); if (collective_epilogue.is_producer_load_needed()) { // Ensure warp is converged before issuing epilogue loads __syncwarp(); epi_load_pipe_producer_state = collective_epilogue.load( epi_load_pipeline, epi_load_pipe_producer_state, problem_shape_MNKL, blk_shape, blk_coord, tiled_mma, lane_idx, shared_storage.tensors.epilogue ); collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state); } } } else if (warp_group_role == WarpGroupRole::Consumer) { Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) collective_mainloop.mma( mainloop_pipeline, mainloop_pipe_consumer_state, accumulators, k_tile_count, warp_group_thread_idx, shared_storage.tensors.mainloop, params.mainloop ); // Make sure the math instructions are done and free buffers before entering the epilogue collective_mainloop.mma_tail( mainloop_pipeline, mainloop_pipe_consumer_state, k_tile_count ); // Epilogue and write to gD auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] = collective_epilogue.store( epi_load_pipeline, epi_load_pipe_consumer_state, epi_store_pipeline, epi_store_pipe_producer_state, problem_shape_MNKL, blk_shape, blk_coord, accumulators, tiled_mma, warp_group_thread_idx, shared_storage.tensors.epilogue ); collective_epilogue.store_tail( epi_load_pipeline, epi_load_pipe_consumer_state_next, epi_store_pipeline, epi_store_pipe_producer_state_next ); } #endif } }; /////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::kernel
cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp", "repo_id": "cutlass", "token_count": 6804 }
33
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/blas3.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/semaphore.h" #include "cutlass/core_io.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function SideMode SideMode_, ///! Side Mode for the kernel (kLeft or kRight) FillMode FillMode_, ///! Fill Mode for triangular matrix (kLower or kUpper) DiagType DiagType_ ///! Diag Type for triangular matrix (kNonUnit or kUnit) > struct TrmmUniversal { public: using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; static SideMode const kSideMode = SideMode_; static FillMode const kFillMode = FillMode_; static DiagType const kDiagType = DiagType_; static ComplexTransform const kTransformA = Mma::kTransformA; static ComplexTransform const kTransformB = Mma::kTransformB; using Operator = typename Mma::Operator; using OperatorClass = typename Mma::Operator::OperatorClass; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename Mma::Operator::Shape; using InstructionShape = typename Mma::Policy::Operator::InstructionShape; using ArchTag = typename Mma::ArchTag; static int const kStages = Mma::kStages; static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Split-K preserves splits that are 128b aligned static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value); // // Structures // /// Argument structure struct Arguments { // // Data members // GemmUniversalMode mode{GemmUniversalMode::kGemm}; GemmCoord problem_size{}; int batch_count{1}; typename EpilogueOutputOp::Params epilogue{}; void const * ptr_A{nullptr}; void const * ptr_B{nullptr}; void * ptr_D{nullptr}; int64_t batch_stride_A{0}; int64_t batch_stride_B{0}; int64_t batch_stride_D{0}; typename LayoutA::Stride::Index lda{0}; typename LayoutB::Stride::Index ldb{0}; typename LayoutC::Stride::Index ldd{0}; // // Methods // Arguments() = default; /// constructs an arguments structure Arguments( GemmUniversalMode mode, GemmCoord problem_size, int batch_count, typename EpilogueOutputOp::Params epilogue, void const * ptr_A, void const * ptr_B, void * ptr_D, int64_t batch_stride_A, int64_t batch_stride_B, int64_t batch_stride_D, typename LayoutA::Stride::Index lda, typename LayoutB::Stride::Index ldb, typename LayoutC::Stride::Index ldd ): mode(mode), problem_size(problem_size), batch_count(batch_count), epilogue(epilogue), ptr_A(ptr_A), ptr_B(ptr_B), ptr_D(ptr_D), batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_D(batch_stride_D), lda(lda), ldb(ldb), ldd(ldd) { } /// Returns arguments for the transposed problem sizes Arguments transposed_problem_size() const { Arguments args(*this); std::swap(args.problem_size.m(), args.problem_size.n()); return args; } /// Returns arguments for the transposed matrices Arguments swapped_matrices() const { Arguments args(*this); std::swap(args.ptr_A, args.ptr_B); std::swap(args.lda, args.ldb); std::swap(args.batch_stride_A, args.batch_stride_B); return args; } }; // // Structure for precomputing values in host memory and passing to kernels // /// Parameters structure struct Params { cutlass::gemm::GemmCoord problem_size{}; cutlass::gemm::GemmCoord grid_tiled_shape{}; int swizzle_log_tile{0}; typename Mma::IteratorA::Params params_A{}; typename Mma::IteratorB::Params params_B{}; typename Epilogue::OutputTileIterator::Params params_D{}; typename EpilogueOutputOp::Params output_op{}; GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm; int batch_count {0}; int gemm_k_size {0}; void * ptr_A{nullptr}; void * ptr_B{nullptr}; void * ptr_D{nullptr}; int64_t batch_stride_A {0}; int64_t batch_stride_B {0}; int64_t batch_stride_D {0}; int *semaphore{nullptr}; // // Methods // Params() = default; CUTLASS_HOST_DEVICE Params( Arguments const &args, cutlass::gemm::GemmCoord const & grid_tiled_shape, int gemm_k_size, void *workspace = nullptr ): problem_size(args.problem_size), grid_tiled_shape(grid_tiled_shape), swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), params_A(args.lda), params_B(args.ldb), params_D(args.ldd), output_op(args.epilogue), mode(args.mode), batch_count(args.batch_count), gemm_k_size(gemm_k_size), ptr_A(const_cast<void *>(args.ptr_A)), ptr_B(const_cast<void *>(args.ptr_B)), ptr_D(args.ptr_D), batch_stride_A(args.batch_stride_A), batch_stride_B(args.batch_stride_B), batch_stride_D(args.batch_stride_D), semaphore(static_cast<int *>(workspace)) { } CUTLASS_HOST_DEVICE void update( Arguments const &args, void *workspace = nullptr) { ptr_A = const_cast<void *>(args.ptr_A); ptr_B = const_cast<void *>(args.ptr_B); ptr_D = args.ptr_D; batch_stride_A = args.batch_stride_A; batch_stride_B = args.batch_stride_B; batch_stride_D = args.batch_stride_D; output_op = args.epilogue; semaphore = static_cast<int *>(workspace); } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; public: // // Methods // CUTLASS_DEVICE TrmmUniversal() { } /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size) { static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { return Status::kErrorMisalignedOperand; } return Status::kSuccess; } static Status can_implement(Arguments const &args) { return can_implement(args.problem_size); } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } int offset_k = 0; int problem_size_k = params.problem_size.k(); ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A); ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B); // // Fetch pointers based on mode. // if (params.mode == GemmUniversalMode::kGemm || params.mode == GemmUniversalMode::kGemmSplitKParallel) { if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; } offset_k = threadblock_tile_offset.k() * params.gemm_k_size; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; } else if (params.mode == GemmUniversalMode::kArray) { ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()]; ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()]; } __syncthreads(); // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_tile_offset.m() * Mma::Shape::kM, offset_k, }; cutlass::MatrixCoord tb_offset_B{ offset_k, threadblock_tile_offset.n() * Mma::Shape::kN }; // Compute position within threadblock int thread_idx = threadIdx.x; // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; /****************************************************************************************************** First two cases: (Left Side, Lower Fill) and (Right Side, Upper Fill) are transpose of each other - (Left Side, Lower Fill): calculate bottom of the CTA tile, then find the k-iterations needed to process all elements till that coordinate. - (Right Side, Upper Fill): calculate right end of the CTA tile, then find the k-iterations needed to process all elements till that coordinate. Last two cases: (Left Side, Upper Fill) and (Right Side, Lower Fill) are transpose of each other - (Left Side, Upper Fill): calculate the top of the CTA tile, then find k-iterations that can be skipped for all elements of this tile. - (Right Side, Lower Fill): calculate the left start of the CTA tile, then find k-iterations that can be skipped for all elements of this tile. ********************************************************************************************************/ if (kSideMode == SideMode::kLeft && kFillMode == FillMode::kLower) { int k_iterations_till_diagonal = ((threadblock_tile_offset.m() + 1) * Mma::Shape::kM + Mma::Shape::kK - 1) / Mma::Shape::kK; if (k_iterations_till_diagonal < gemm_k_iterations) { gemm_k_iterations = k_iterations_till_diagonal; } } else if (kSideMode == SideMode::kRight && kFillMode == FillMode::kUpper) { int k_iterations_till_diagonal = ((threadblock_tile_offset.n() + 1) * Mma::Shape::kN + Mma::Shape::kK - 1) / Mma::Shape::kK; if (k_iterations_till_diagonal < gemm_k_iterations) { gemm_k_iterations = k_iterations_till_diagonal; } } else if (kSideMode == SideMode::kLeft && kFillMode == FillMode::kUpper) { int k_iterations_till_diagonal = ((threadblock_tile_offset.m()) * Mma::Shape::kM) / Mma::Shape::kK; if (k_iterations_till_diagonal != 0) { tb_offset_A += cutlass::MatrixCoord({0, k_iterations_till_diagonal * Mma::Shape::kK}); tb_offset_B += cutlass::MatrixCoord({k_iterations_till_diagonal * Mma::Shape::kK, 0}); gemm_k_iterations -= k_iterations_till_diagonal; } } else if (kSideMode == SideMode::kRight && kFillMode == FillMode::kLower) { int k_iterations_till_diagonal = ((threadblock_tile_offset.n()) * Mma::Shape::kN) / Mma::Shape::kK; if (k_iterations_till_diagonal != 0) { tb_offset_A += cutlass::MatrixCoord({0, k_iterations_till_diagonal * Mma::Shape::kK}); tb_offset_B += cutlass::MatrixCoord({k_iterations_till_diagonal * Mma::Shape::kK, 0}); gemm_k_iterations -= k_iterations_till_diagonal; } } // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.params_A, ptr_A, {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_A); typename Mma::IteratorB iterator_B( params.params_B, ptr_B, {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B); // Compute threadblock-scoped matrix multiply-add mma( gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); // // Epilogue // EpilogueOutputOp output_op(params.output_op); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.n() * Mma::Shape::kN ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D); // // Fetch pointers based on mode. // // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); if (params.mode == GemmUniversalMode::kGemm) { // If performing a reduction via split-K, fetch the initial synchronization if (params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } } else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kArray) { ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()]; } // Tile iterator loading from source tensor (although irrelevant to this kernel as beta is zero). typename Epilogue::OutputTileIterator iterator_C( params.params_D, ptr_D, params.problem_size.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.problem_size.mn(), thread_idx, threadblock_offset ); Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_offset.k()); __threadfence(); } // Execute the epilogue operator to update the destination tensor. epilogue( output_op, iterator_D, accumulators, iterator_C); // // Release the semaphore // if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/trmm_universal.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/trmm_universal.h", "repo_id": "cutlass", "token_count": 7375 }
34