file_path
stringlengths
20
207
content
stringlengths
5
3.85M
size
int64
5
3.85M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.26
0.93
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/conv3d_dgrad_filter_tile_access_iterator_optimized.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile) matrix from memory. This iterator assumes TensorNHWC layout of tensors in Global Memory. The iterator is specialized for each of the three convolution operators: forward propagation (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad). */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/conv/threadblock/conv3d_params.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename Element_, typename ThreadMap_, conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity > class Conv3dDgradFilterTileAccessIteratorOptimized { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = layout::TensorNDHWC; using ThreadMap = ThreadMap_; using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; using TensorRef = cutlass::TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized; static StrideSupport const kStrideSupport = StrideSupport_; static int const kConvDim = 3; using ConvProblemSize = typename conv::Conv3dProblemSize; static int const kAccessesPerVector = 1; // // Parameters structure // struct Params : Conv3dDgradFilterIteratorOptimizedParams { // // Methods // CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Conv3dDgradFilterIteratorOptimizedParams const &base): Conv3dDgradFilterIteratorOptimizedParams(base) { } CUTLASS_HOST_DEVICE Params( Conv3dProblemSize const &problem_size, Layout const &layout ): Conv3dDgradFilterIteratorOptimizedParams( problem_size, layout, sizeof_bits<Element>::value, {Shape::kRow, Shape::kColumn}, ThreadMap::kThreads, ThreadMap::kElementsPerAccess, {ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided}, {ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided} ) { } }; private: Conv3dDgradFilterIteratorOptimizedParams const &params_; Conv3dProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; char const *pointer_; uint32_t predicates_; int filter_trs_; int filter_k_; // // Assertions // // We map predicates into bits packed in this uint32_t container static_assert(ThreadMap::Iterations::kStrided * ThreadMap::Iterations::kContiguous < sizeof(predicates_) * 8, "Currently, the number of loads per iteration is limited by the size of the predicates container."); public: CUTLASS_HOST_DEVICE Conv3dDgradFilterTileAccessIteratorOptimized( Conv3dDgradFilterIteratorOptimizedParams const &params, Conv3dProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() ): params_(params), problem_size_(problem_size), pointer_(reinterpret_cast<char const *>(ptr)), predicates_(0), filter_trs_(0), filter_k_(0) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); filter_k_ = threadblock_offset.row() + thread_coord.strided(); Index column = threadblock_offset.column() + thread_coord.contiguous(); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int filter_k = filter_k_ + s * ThreadMap::Delta::kStrided; int filter_c = column + c * ThreadMap::Delta::kContiguous; uint32_t pred = ((filter_k < problem_size_.K && filter_c < problem_size_.C) ? 1u : 0); int pred_idx = c + s * ThreadMap::Iterations::kContiguous; predicates_ |= (pred << pred_idx); } } pointer_ += ( filter_k_ * params.layout.stride()[3] + column ) * sizeof_bits<Element>::value / 8; set_iteration_index(0); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; iteration_strided_ = index / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_HOST_DEVICE void advance() { LongIndex next = params_.inc_next_trs; // moves to the next tile ++filter_trs_; if (filter_trs_ == params_.TRS) { filter_trs_ = 0; next = params_.inc_next_k; filter_k_ += params_.filter_k_delta; } // Clear predicates if needed CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { if (filter_k_ + s * ThreadMap::Delta::kStrided >= problem_size_.K) { uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous); predicates_ = (predicates_ & (~kClearMask)); } } pointer_ += next; } /// Returns true if the current coordinate is within the filter tensor W CUTLASS_HOST_DEVICE bool valid() { LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous; return (predicates_ & (1u << pred_idx)); } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { return reinterpret_cast<AccessType const *>(pointer_ + iteration_contiguous_ * ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8); } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv3dDgradFilterTileAccessIteratorOptimized &operator++() { ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { // Move to the next K coordinate within the tile pointer_ += params_.inc_next_strided; return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(Conv3dProblemSize const &problem_size) { // check alignment constraint on iterator's contiguous dimension if (problem_size.C % (128/sizeof_bits<Element>::value)) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
9,569
C
32
123
0.657122
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/threadblock/implicit_gemm_multistage.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a multistage threadblock-scoped Implicit GEMM Convolution kernel. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/cache_operation.h" #include "cutlass/gemm/threadblock/mma_base.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class ImplicitGemmMultistage : public gemm::threadblock::MmaBase<Shape_, Policy_, Stages> { public: ///< Base class using Base = gemm::threadblock::MmaBase<Shape_, Policy_, Stages>; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Iterates over tiles of A operand in global memory using IteratorA = IteratorA_; ///< Iterates over tiles of B operand in global memory using IteratorB = IteratorB_; ///< Policy describing tuning details using Policy = Policy_; using SmemIteratorA = SmemIteratorA_; using SmemIteratorB = SmemIteratorB_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; // // Dependent types // /// Fragment of accumulator tile using ElementC = typename Policy::Operator::ElementC; using FragmentC = typename Policy::Operator::FragmentC; /// Warp-level Mma using Operator = typename Policy::Operator; /// Internal structure exposed for introspection. struct Detail { /// Number of cp.async instructions to load one stage of operand A static int const AsyncCopyIterationsPerStageA = IteratorA::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB = IteratorB::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of cp.async instructions to load on group of operand A static int const kAccessesPerGroupA = (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB = (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; }; private: using WarpLoadedFragmentA = typename Operator::FragmentA; using WarpLoadedFragmentB = typename Operator::FragmentB; using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA smem_iterator_A_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB smem_iterator_B_; public: /// Construct from tensor references CUTLASS_DEVICE ImplicitGemmMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx ): Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); } CUTLASS_DEVICE void copy_tiles_and_advance( IteratorA &iterator_A, IteratorB &iterator_B, int group_start_A = 0, int group_start_B = 0) { iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector); this->smem_iterator_A_.set_iteration_index(group_start_A); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, iterator_A.get(), iterator_A.valid()); ++iterator_A; } ++this->smem_iterator_A_; } } iterator_B.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector); this->smem_iterator_B_.set_iteration_index(group_start_B); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, iterator_B.get(), iterator_B.valid()); ++iterator_B; } ++this->smem_iterator_B_; } } } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations, ///< destination accumulator tile FragmentC &accum, ///< iterator over A operand in global memory IteratorA iterator_A, ///< iterator over B operand in global memory IteratorB iterator_B, ///< initial value of accumulator FragmentC const &src_accum, ///< number of iterations per channel int gemm_k_iterations_per_channel = 0, ///< Imaginary strides used for planar-complex only - ignored here int64_t imag_stride_A = 0, int64_t imag_stride_B = 0) { // // Prologue // // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) { iterator_A.set_iteration_index(0); this->smem_iterator_A_.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, iterator_A.get(), iterator_A.valid()); ++iterator_A; } ++this->smem_iterator_A_; } iterator_B.set_iteration_index(0); this->smem_iterator_B_.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, iterator_B.get(), iterator_B.valid()); ++iterator_B; } ++this->smem_iterator_B_; } // Move to the next stage iterator_A.advance(); iterator_B.advance(); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); } // Perform accumulation in the 'd' output operand accum = src_accum; // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA warp_loaded_frag_A[2]; WarpLoadedFragmentB warp_loaded_frag_B[2]; WarpTransformedFragmentA warp_transformed_frag_A[2]; WarpTransformedFragmentB warp_transformed_frag_B[2]; Operator warp_mma; this->warp_tile_iterator_A_.set_kgroup_index(0); this->warp_tile_iterator_B_.set_kgroup_index(0); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; // Start issuing the first group of the next stage outside of the mainloop copy_tiles_and_advance(iterator_A, iterator_B); int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], warp_loaded_frag_A[0], warp_loaded_frag_B[0]); // tf32x3 kernels use staging accumulation. warp_mma uses a temporary // accumulator and this temporary accumulator is added to the final // accumulator once in every mainloop iteration. plus<FragmentC> plus_accum; FragmentC tmp_accum; if (platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { tmp_accum.clear(); } // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations > (-Base::kStages + 1);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; if (warp_mma_k > 0) warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], warp_loaded_frag_A[warp_mma_k % 2], warp_loaded_frag_B[warp_mma_k % 2]); // Issue global->shared copies for the next stage int group_start_iteration_A, group_start_iteration_B; if (warp_mma_k + 1 == Base::kWarpGemmIterations) { group_start_iteration_A = 0; group_start_iteration_B = 0; } else { group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; } copy_tiles_and_advance(iterator_A, iterator_B, group_start_iteration_A, group_start_iteration_B); if (platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { warp_mma( tmp_accum, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], tmp_accum ); if (warp_mma_k == 0) { accum = plus_accum(accum, tmp_accum); tmp_accum.clear(); } } else { warp_mma( accum, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], accum ); } if (warp_mma_k + 1 == Base::kWarpGemmIterations) warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], warp_transformed_frag_B[(warp_mma_k + 1) % 2], warp_loaded_frag_A[(warp_mma_k + 1) % 2], warp_loaded_frag_B[(warp_mma_k + 1) % 2]); if (warp_mma_k + 2 == Base::kWarpGemmIterations) { // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages of cp.async have committed arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_A.advance(); iterator_B.advance(); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A_.add_tile_offset( {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_B_.add_tile_offset( {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations; } } } if (platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { accum = plus_accum(accum, tmp_accum); } // Insert fence and wait for all outstanding cp.async operations to commit. cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
20,086
C
35.992633
100
0.609778
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/device/direct_convolution.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Template for device-level Depthwise Convolution */ #pragma once #include <limits> #include "cutlass/cutlass.h" #include "cutlass/device_kernel.h" #include "cutlass/conv/convolution.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template<typename DirectConvolutionKernel_> class DirectConvolution { public: using UnderlyingKernel = DirectConvolutionKernel_; using ElementA = typename UnderlyingKernel::ElementA; using LayoutA = typename UnderlyingKernel::LayoutA; using ElementB = typename UnderlyingKernel::ElementB; using LayoutB = typename UnderlyingKernel::LayoutB; using ElementC = typename UnderlyingKernel::ElementC; using LayoutC = typename UnderlyingKernel::LayoutC; using ElementAccumulator = typename UnderlyingKernel::ElementAccumulator; using ElementCompute = typename UnderlyingKernel::ElementCompute; using OperatorClass = typename UnderlyingKernel::OperatorClass; using ArchTag = typename UnderlyingKernel::ArchTag; using ThreadblockShape = typename UnderlyingKernel::ThreadblockShape; using WarpShape = typename UnderlyingKernel::WarpShape; using InstructionShape = typename UnderlyingKernel::InstructionShape; using ThreadblockSwizzle = typename UnderlyingKernel::ThreadblockSwizzle; using EpilogueOutputOp = typename UnderlyingKernel::EpilogueOutputOp; static int const kStages = UnderlyingKernel::kStages; static int const kConvDim = UnderlyingKernel::kConvDim; using WarpMmaOperator = typename UnderlyingKernel::WarpMmaOperator; using ArchMmaOperator = typename UnderlyingKernel::ArchMmaOperator; using MathOperator = typename UnderlyingKernel::MathOperator; static cutlass::conv::Operator const kConvolutionalOperator = UnderlyingKernel::kConvolutionalOperator; static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = UnderlyingKernel::kIteratorAlgorithm; static cutlass::conv::StrideSupport const kStrideSupport = UnderlyingKernel::kStrideSupport; static cutlass::conv::GroupMode const kGroupMode = UnderlyingKernel::kGroupMode; static int const kWarpCount = (ThreadblockShape::kM / WarpShape::kM) * (ThreadblockShape::kN / WarpShape::kN) * (ThreadblockShape::kK / WarpShape::kK); /// Argument structure using Arguments = typename UnderlyingKernel::Arguments; using ReorderKernel = typename UnderlyingKernel::ReorderKernel; private: /// Kernel parameters object typename UnderlyingKernel::Params params_; public: /// Constructs Implicit GEMM DirectConvolution() { } /// Determines whether the Implicit GEMM can execute the given problem. static Status can_implement(Arguments const &args) { // dispatch to iterators Status status = UnderlyingKernel::Mma::IteratorA::can_implement(args.problem_size); if (Status::kSuccess != status) { return status; } status = UnderlyingKernel::Mma::IteratorB::can_implement(args.problem_size); if (Status::kSuccess != status) { return status; } if (kGroupMode != conv::GroupMode::kDepthwise) { return Status::kErrorInvalidProblem; } // C and K should be multiple of groups if (args.problem_size.K != args.problem_size.groups && args.problem_size.C != args.problem_size.groups) { return Status::kErrorInvalidProblem; } static int const kAlignmentC = UnderlyingKernel::Epilogue::OutputTileIterator::kElementsPerAccess; if (kConvolutionalOperator == conv::Operator::kFprop) { if (args.problem_size.K % kAlignmentC) return Status::kErrorMisalignedOperand; } else if (kConvolutionalOperator == conv::Operator::kDgrad) { if (args.problem_size.C % kAlignmentC) return Status::kErrorMisalignedOperand; } else if (kConvolutionalOperator == conv::Operator::kWgrad) { if (args.problem_size.C % kAlignmentC) return Status::kErrorMisalignedOperand; } // Determine grid shape ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape( threadblock_swizzle.get_tiled_shape( kConvolutionalOperator, args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices)); if (!(grid.y <= std::numeric_limits<uint16_t>::max() && grid.z <= std::numeric_limits<uint16_t>::max())) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { return 0; } /// Initializes GEMM state from arguments. Status initialize( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { // initialize the params structure from the arguments params_ = typename UnderlyingKernel::Params( args, static_cast<int *>(workspace) ); int smem_size = int(sizeof(typename UnderlyingKernel::SharedStorage)); if (smem_size >= (48 << 10)) { cudaError_t result = cudaFuncSetAttribute(cutlass::Kernel<UnderlyingKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } return Status::kSuccess; } /// Initializes GEMM state from arguments. Status update(Arguments const &args, void *workspace = nullptr) { // update the params structure from the arguments params_.ptr_A = args.ref_A.data(); params_.ptr_B = args.ref_B.data(); params_.ptr_C = args.ref_C.data(); params_.ptr_D = args.ref_D.data(); params_.output_op = args.output_op; params_.ptr_reordered_B = args.ref_reordered_B.data();; params_.semaphore = static_cast<int *>(workspace); return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { // Launch reorder kernel if (params_.ptr_reordered_B != nullptr) { dim3 grid = ReorderKernel::get_grid_shape(params_); dim3 block = ReorderKernel::get_block_shape(); cutlass::Kernel<ReorderKernel><<<grid, block, 0, stream>>>(params_); } // Launch main kernel ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); dim3 block(32 * kWarpCount, 1, 1); // Dynamic SMEM size based on input params. int smem_size = int(params_.get_smem_size()); // Make sure we can use that much shared memory. cudaError_t status = cudaFuncSetAttribute(cutlass::Kernel<UnderlyingKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (status != cudaSuccess) return Status::kErrorInternal; cutlass::Kernel<UnderlyingKernel><<<grid, block, smem_size, stream>>>(params_); cudaError_t result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } int get_smem_size() { return int(params_.get_smem_size()); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } } } /////////////////////////////////////////////////////////////////////////////////////////////////
9,744
C
35.092592
120
0.67149
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/device/implicit_gemm_convolution.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Template for device-level Implicit GEMM Convolution */ #pragma once #include <limits> #include "cutlass/cutlass.h" #include "cutlass/device_kernel.h" #include "cutlass/conv/convolution.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template<typename ImplicitGemmKernel_> class ImplicitGemmConvolution { public: using UnderlyingKernel = ImplicitGemmKernel_; using ElementA = typename UnderlyingKernel::ElementA; using LayoutA = typename UnderlyingKernel::LayoutA; using ElementB = typename UnderlyingKernel::ElementB; using LayoutB = typename UnderlyingKernel::LayoutB; using ElementC = typename UnderlyingKernel::ElementC; using LayoutC = typename UnderlyingKernel::LayoutC; using ElementAccumulator = typename UnderlyingKernel::ElementAccumulator; using ElementCompute = typename UnderlyingKernel::ElementCompute; using OperatorClass = typename UnderlyingKernel::OperatorClass; using ArchTag = typename UnderlyingKernel::ArchTag; using ThreadblockShape = typename UnderlyingKernel::ThreadblockShape; using WarpShape = typename UnderlyingKernel::WarpShape; using InstructionShape = typename UnderlyingKernel::InstructionShape; using ThreadblockSwizzle = typename UnderlyingKernel::ThreadblockSwizzle; using EpilogueOutputOp = typename UnderlyingKernel::EpilogueOutputOp; static int const kStages = UnderlyingKernel::kStages; static int const kConvDim = UnderlyingKernel::kConvDim; using WarpMmaOperator = typename UnderlyingKernel::WarpMmaOperator; using ArchMmaOperator = typename UnderlyingKernel::ArchMmaOperator; using MathOperator = typename UnderlyingKernel::MathOperator; static cutlass::conv::Operator const kConvolutionalOperator = UnderlyingKernel::kConvolutionalOperator; static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = UnderlyingKernel::kIteratorAlgorithm; static cutlass::conv::StrideSupport const kStrideSupport = UnderlyingKernel::kStrideSupport; static cutlass::conv::GroupMode const kGroupMode = UnderlyingKernel::kGroupMode; static int const kWarpCount = (ThreadblockShape::kM / WarpShape::kM) * (ThreadblockShape::kN / WarpShape::kN) * (ThreadblockShape::kK / WarpShape::kK); /// Argument structure using Arguments = typename UnderlyingKernel::Arguments; private: /// Kernel parameters object typename UnderlyingKernel::Params params_; public: /// Constructs Implicit GEMM ImplicitGemmConvolution() { } /// Determines whether the Implicit GEMM can execute the given problem. static Status can_implement(Arguments const &args) { // dispatch to iterators Status status = UnderlyingKernel::Mma::IteratorA::can_implement(args.problem_size); if (Status::kSuccess != status) { return status; } status = UnderlyingKernel::Mma::IteratorB::can_implement(args.problem_size); if (Status::kSuccess != status) { return status; } // check group conv constraint if (args.problem_size.groups != 1) { if (kGroupMode == conv::GroupMode::kNone) { return Status::kErrorInvalidProblem; } // C and K should be multiple of groups if (args.problem_size.K % args.problem_size.groups || args.problem_size.C % args.problem_size.groups) { return Status::kErrorInvalidProblem; } // split-k is not supported if (args.problem_size.split_k_slices != 1) { return Status::kErrorInvalidProblem; } int k_per_group = args.problem_size.K / args.problem_size.groups; // k_per_group should be multiple of ThreadblockShape N, one CTA calculate one group if (kGroupMode == conv::GroupMode::kSingleGroup && k_per_group % ThreadblockShape::kN) { return Status::kErrorInvalidProblem; } // ThreadblockShape::kN should be divisible by k_per_group, one CTA calculate multiple groups if (kGroupMode == conv::GroupMode::kMultipleGroup && ThreadblockShape::kN % k_per_group) { return Status::kErrorInvalidProblem; } // current optimized iterator algo only supports SingleGroup mode if (kIteratorAlgorithm == IteratorAlgorithm::kOptimized && kGroupMode != conv::GroupMode::kSingleGroup) { return Status::kErrorInvalidProblem; } } static int const kAlignmentC = UnderlyingKernel::Epilogue::OutputTileIterator::kElementsPerAccess; if (kConvolutionalOperator == conv::Operator::kFprop) { if (args.problem_size.K % kAlignmentC) return Status::kErrorMisalignedOperand; } else if (kConvolutionalOperator == conv::Operator::kDgrad) { if (args.problem_size.C % kAlignmentC) return Status::kErrorMisalignedOperand; } else if (kConvolutionalOperator == conv::Operator::kWgrad) { if (args.problem_size.C % kAlignmentC) return Status::kErrorMisalignedOperand; } // check for unsupported problem sizes for strided dgrad implementation if (kConvolutionalOperator == conv::Operator::kDgrad && kStrideSupport == conv::StrideSupport::kStrided) { // Unity stride (1x1) is supported by strided dgrad but disabled for performance // reasons. For unity stride, use strided dgrad optimized unity stride specialization. // Note that unit tests strided dgrad for unity stride to make sure that strided // dgrad implemetnation is functionaly sound. // Strided dgrad implementation also support mixed strides, i.e., (1x2) and (2x1) if(args.problem_size.stride_h == 1 && args.problem_size.stride_w == 1) { return Status::kErrorNotSupported; } // split-k (serial or parallel) is not supported for strided dgrad if(args.problem_size.split_k_slices > 1) { return Status::kErrorNotSupported; } // dilation > {1x1} is not supported for strided dgrad if(args.problem_size.dilation_h > 1 || args.problem_size.dilation_w > 1) { return Status::kErrorNotSupported; } } // Determine grid shape ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape( threadblock_swizzle.get_tiled_shape( kConvolutionalOperator, args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices)); if (!(grid.y <= std::numeric_limits<uint16_t>::max() && grid.z <= std::numeric_limits<uint16_t>::max())) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { size_t workspace_bytes = 0; // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape( kConvolutionalOperator, args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices); if(args.split_k_mode == SplitKMode::kParallel) { // Split-K parallel: CTAs in k-dimension write the partial results in a temporary workspace. // The user needs to call a reduction operator to optain the final output tensor workspace_bytes = sizeof(ElementAccumulator) * size_t(cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, args.problem_size)) * size_t(grid_tiled_shape.k()); } else if(args.split_k_mode == SplitKMode::kSerial && args.problem_size.split_k_slices > 1) { // Split-K serial: The user workspace is used to store semaphore and serialize writing the // final reduced output to user's output tensor workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n()); } return workspace_bytes; } /// Initializes GEMM state from arguments. Status initialize( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { if (args.problem_size.split_k_slices > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } cudaError_t status = cudaMemsetAsync(workspace, 0, get_workspace_size(args), stream); if (status != cudaSuccess) { return Status::kErrorInternal; } } // initialize the params structure from the arguments params_ = typename UnderlyingKernel::Params( args, static_cast<int *>(workspace) ); int smem_size = int(sizeof(typename UnderlyingKernel::SharedStorage)); if (smem_size >= (48 << 10)) { cudaError_t result = cudaFuncSetAttribute(cutlass::Kernel<UnderlyingKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } return Status::kSuccess; } /// Initializes GEMM state from arguments. Status update(Arguments const &args, void *workspace = nullptr) { // update the params structure from the arguments params_.ptr_A = args.ref_A.data(); params_.ptr_B = args.ref_B.data(); params_.ptr_C = args.ref_C.data(); params_.ptr_D = args.ref_D.data(); params_.output_op = args.output_op; params_.semaphore = static_cast<int *>(workspace); return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); dim3 block(32 * kWarpCount, 1, 1); int smem_size = int(sizeof(typename UnderlyingKernel::SharedStorage)); cutlass::Kernel<UnderlyingKernel><<<grid, block, smem_size, stream>>>(params_); cudaError_t result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } } } /////////////////////////////////////////////////////////////////////////////////////////////////
12,619
C
36.337278
106
0.669387
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/device/implicit_gemm_convolution_fusion.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Template for device-level fused activation's scale+bias+relu and Implicit GEMM Convolution */ #pragma once #include <limits> #include "cutlass/cutlass.h" #include "cutlass/device_kernel.h" #include "cutlass/conv/convolution.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template<typename ImplicitGemmFusionKernel_> class ImplicitGemmConvolutionFusion { public: using ImplicitGemmFusionKernel = ImplicitGemmFusionKernel_; using ElementA = typename ImplicitGemmFusionKernel::ElementA; using LayoutA = typename ImplicitGemmFusionKernel::LayoutA; using ElementB = typename ImplicitGemmFusionKernel::ElementB; using LayoutB = typename ImplicitGemmFusionKernel::LayoutB; // using ElementScaleBias = typename ImplicitGemmFusionKernel::ElementScaleBias; // using LayoutScaleBias = typename ImplicitGemmFusionKernel::LayoutScaleBias; using ElementC = typename ImplicitGemmFusionKernel::ElementC; using LayoutC = typename ImplicitGemmFusionKernel::LayoutC; using ElementAccumulator = typename ImplicitGemmFusionKernel::ElementAccumulator; using ElementCompute = typename ImplicitGemmFusionKernel::ElementCompute; using OperatorClass = typename ImplicitGemmFusionKernel::OperatorClass; using ArchTag = typename ImplicitGemmFusionKernel::ArchTag; using ThreadblockShape = typename ImplicitGemmFusionKernel::ThreadblockShape; using WarpShape = typename ImplicitGemmFusionKernel::WarpShape; using InstructionShape = typename ImplicitGemmFusionKernel::InstructionShape; using ThreadblockSwizzle = typename ImplicitGemmFusionKernel::ThreadblockSwizzle; using EpilogueOutputOp = typename ImplicitGemmFusionKernel::EpilogueOutputOp; static int const kStages = ImplicitGemmFusionKernel::kStages; static int const kConvDim = ImplicitGemmFusionKernel::kConvDim; using WarpMmaOperator = typename ImplicitGemmFusionKernel::WarpMmaOperator; using ArchMmaOperator = typename ImplicitGemmFusionKernel::ArchMmaOperator; using MathOperator = typename ImplicitGemmFusionKernel::MathOperator; static cutlass::conv::Operator const kConvolutionalOperator = ImplicitGemmFusionKernel::kConvolutionalOperator; static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = ImplicitGemmFusionKernel::kIteratorAlgorithm; static int const kWarpCount = (ThreadblockShape::kM / WarpShape::kM) * (ThreadblockShape::kN / WarpShape::kN) * (ThreadblockShape::kK / WarpShape::kK); /// Argument structure using Arguments = typename ImplicitGemmFusionKernel::Arguments; private: /// Kernel parameters object typename ImplicitGemmFusionKernel::Params params_; public: /// Constructs Implicit GEMM ImplicitGemmConvolutionFusion() { } /// Determines whether the Implicit GEMM can execute the given problem. static Status can_implement(Arguments const &args) { // dispatch to iterators Status status = ImplicitGemmFusionKernel::Mma::IteratorA::can_implement(args.problem_size); if (Status::kSuccess != status) { return status; } status = ImplicitGemmFusionKernel::Mma::IteratorB::can_implement(args.problem_size); if (Status::kSuccess != status) { return status; } // Determine grid shape ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape( threadblock_swizzle.get_tiled_shape( cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size), {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices)); if (!(grid.y <= std::numeric_limits<uint16_t>::max() && grid.z <= std::numeric_limits<uint16_t>::max())) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { size_t workspace_bytes = 0; // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape( cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size), {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices); if(args.split_k_mode == SplitKMode::kParallel) { // Split-K parallel: CTAs in k-dimension write the partial results in a temporary workspace. // The user needs to call a reduction operator to optain the final output tensor workspace_bytes = sizeof(ElementAccumulator) * size_t(cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, args.problem_size)) * size_t(grid_tiled_shape.k()); } else if(args.split_k_mode == SplitKMode::kSerial && args.problem_size.split_k_slices > 1) { // Split-K serial: The user workspace is used to store semaphore and serialize writing the // final reduced output to user's output tensor workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n()); } return workspace_bytes; } /// Initializes GEMM state from arguments. Status initialize( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { if (args.problem_size.split_k_slices > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } cudaError_t status = cudaMemsetAsync(workspace, 0, get_workspace_size(args), stream); if (status != cudaSuccess) { return Status::kErrorInternal; } } // initialize the params structure from the arguments params_ = typename ImplicitGemmFusionKernel::Params( args, static_cast<int *>(workspace) ); int smem_size = int(sizeof(typename ImplicitGemmFusionKernel::SharedStorage)); if (smem_size >= (48 << 10)) { cudaError_t result = cudaFuncSetAttribute(cutlass::Kernel<ImplicitGemmFusionKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } return Status::kSuccess; } /// Initializes Impicit GEMM state from arguments. Status update(Arguments const &args, void *workspace = nullptr) { // update the params structure from the arguments params_.ptr_A = args.ref_A.data(); params_.ptr_B = args.ref_B.data(); params_.ptr_scale = args.ref_A_scale.data(); params_.ptr_bias = args.ref_A_bias.data(); params_.ptr_C = args.ref_C.data(); params_.ptr_D = args.ref_D.data(); params_.output_op = args.output_op; params_.semaphore = static_cast<int *>(workspace); return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); dim3 block(32 * kWarpCount, 1, 1); int smem_size = int(sizeof(typename ImplicitGemmFusionKernel::SharedStorage)); cutlass::Kernel<ImplicitGemmFusionKernel><<<grid, block, smem_size, stream>>>(params_); cudaError_t result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } } } /////////////////////////////////////////////////////////////////////////////////////////////////
10,044
C
36.342007
114
0.681302
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_depthwise_fprop.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level Depthwise implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/kernel/direct_convolution.h" #include "cutlass/conv/threadblock/depthwise_mma_core_with_lane_access_size.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/depthwise_fprop_pipelined.h" // Direct Conv Related Header files #include "cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_optimized.h" #include "cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_fixed_stride_dilation.h" #include "cutlass/conv/threadblock/depthwise_fprop_filter_tile_access_iterator_direct_conv_optimized.h" #include "cutlass/conv/threadblock/depthwise_fprop_direct_conv_multistage.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for DepthwiseFprop template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic, conv::StrideSupport StrideSupport = StrideSupport::kStrided, /// Access granularity of A matrix in units of elements int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value, /// Access granularity of B matrix in units of elements int AlignmentB = cutlass::sizeof_bits<ElementB>::value / cutlass::sizeof_bits<ElementB>::value > struct DefaultDepthwiseFprop; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for DepthwiseFprop with direct convolution algorithm template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename ThreadBlockOutputShape, typename FilterShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic, conv::StrideSupport StrideSupport = StrideSupport::kStrided, // MatrixShape<Height, Width> typename StrideShape = cutlass::MatrixShape<-1, -1>, // MatrixShape< Height, Width> typename DilationShape = cutlass::MatrixShape<-1, -1>, /// Access granularity of A matrix in units of elements int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value, /// Access granularity of B matrix in units of elements int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value > struct DefaultDepthwiseDirect2dConvFprop; ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassSimt convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Depthwise specialization for Analytic IteratorAlgorithm template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultDepthwiseFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, // cutlass::arch::OpMultiplyAdd IteratorAlgorithm::kAnalytic, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::conv::threadblock::DepthwiseMmaCoreWithLaneAccessSize< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 128, sizeof_bits<ElementB>::value, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB, cutlass::conv::GroupMode::kDepthwise > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::DepthwiseFpropPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv2dProblemSize, cutlass::conv::GroupMode::kDepthwise >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Depthwise specialization for direct 2d conv implementation, /// multiple stage pipeline, and SIMT-based mainloop template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename ThreadBlockOutputShape, typename FilterShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, typename StrideShape, typename DilationShape, int AlignmentA, int AlignmentB > struct DefaultDepthwiseDirect2dConvFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport, StrideShape, DilationShape, AlignmentA, AlignmentB > { // One warp handles the entrie groups per cta. static_assert(ThreadblockShape::kN == WarpShape::kN, "ThreadblockShape::kN should be same as WarpShape::kN "); static_assert(ThreadblockShape::kK == FilterShape::kCount && WarpShape::kK == FilterShape::kCount, "ThreadblockShape::kK and WarpShape::kK should be same as filter size"); static_assert(ThreadblockShape::kM % WarpShape::kM == 0, "ThreadblockShape::kM must be divisible by WarpShape shape::kM"); static_assert(ThreadBlockOutputShape::kN, "ThreadBlockOutputShape::kN should be 1"); // Define the core components from GEMM using MmaCore = typename cutlass::conv::threadblock::DepthwiseDirectConvMmaCoreWithLaneAccessSize< ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 128, 128, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM,ThreadblockShape::kN>, // < outputShape:KMNK, groups per cta> ThreadBlockOutputShape, ElementA, LayoutA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kN, FilterShape::kCount>, ElementB, LayoutB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; using ThreadOutputShape = typename MmaCore::ThreadOutputShape; static cutlass::arch::CacheOperation::Kind const CacheOpA = ((sizeof_bits<ElementA>::value * AlignmentA) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultDirectConvEpilogueSimt< ThreadblockShape, // < outputShape:KMNK, groups per cta> WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount, ThreadOutputShape, ThreadBlockOutputShape >::Epilogue; // Define the Mma using Mma = threadblock::DepthwiseFpropDirectConvMultipleStage< ThreadblockShape, IteratorA, SmemIteratorA, CacheOpA, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages, Epilogue >; // Define the kernel using Kernel = cutlass::conv::kernel::DirectConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv2dProblemSize, cutlass::conv::GroupMode::kDepthwise, ThreadBlockOutputShape >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Depthwise specialization for direct 2d conv implementation, /// multiple stage pipeline, and SIMT-based mainloop template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename ThreadBlockOutputShape, typename FilterShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, typename StrideShape, typename DilationShape, int AlignmentA, int AlignmentB > struct DefaultDepthwiseDirect2dConvFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kFixedStrideDilation, StrideSupport, StrideShape, DilationShape, AlignmentA, AlignmentB, > { // One warp handles the entrie groups per cta. static_assert(ThreadblockShape::kN == WarpShape::kN, "ThreadblockShape::kN should be same as WarpShape::kN "); static_assert(ThreadblockShape::kK == FilterShape::kCount && WarpShape::kK == FilterShape::kCount, "ThreadblockShape::kK and WarpShape::kK should be same as filter size"); static_assert(ThreadblockShape::kM % WarpShape::kM == 0, "ThreadblockShape::kM must be divisible by WarpShape shape::kM"); static_assert(ThreadBlockOutputShape::kN, "ThreadBlockOutputShape::kN should be 1"); static_assert(StrideShape::kRow >= 0 && StrideShape::kColumn >= 0, "Stride should be fixed"); static_assert(DilationShape::kRow >= 0 && DilationShape::kColumn >= 0, "Stride should be fixed"); // Activations loaded by threadblock static int const ActivationShapeH = (ThreadBlockOutputShape::kH - 1) * StrideShape::kRow + (FilterShape::kRow - 1) * DilationShape::kRow + 1; static int const ActivationShapeW = (ThreadBlockOutputShape::kW - 1) * StrideShape::kColumn + (FilterShape::kColumn - 1) * DilationShape::kColumn + 1; using ActivationShape = cutlass::conv::TensorNHWCShape<1, ActivationShapeH, ActivationShapeW, ThreadblockShape::kN >; // Define the core components from GEMM using MmaCore = typename cutlass::conv::threadblock::DepthwiseDirectConvMmaCoreWithLaneAccessSize< ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 128, 128, Stages, MathOperatorTag, IteratorAlgorithm::kFixedStrideDilation, StrideShape, DilationShape, ActivationShape>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation< cutlass::MatrixShape<ThreadblockShape::kM,ThreadblockShape::kN>, // < outputShape:KMNK, groups per cta> ThreadBlockOutputShape, StrideShape, DilationShape, ActivationShape, ElementA, LayoutA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kN, FilterShape::kCount>, ElementB, LayoutB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; using ThreadOutputShape = typename MmaCore::ThreadOutputShape; static cutlass::arch::CacheOperation::Kind const CacheOpA = ((sizeof_bits<ElementA>::value * AlignmentA) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultDirectConvEpilogueSimt< ThreadblockShape, // < outputShape:KMNK, groups per cta> WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount, ThreadOutputShape, ThreadBlockOutputShape >::Epilogue; // Define the Mma using Mma = threadblock::DepthwiseFpropDirectConvMultipleStage< ThreadblockShape, IteratorA, SmemIteratorA, CacheOpA, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages, Epilogue, IteratorAlgorithm::kFixedStrideDilation >; // Define the kernel using Kernel = cutlass::conv::kernel::DirectConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv2dProblemSize, cutlass::conv::GroupMode::kDepthwise, ThreadBlockOutputShape >; }; } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
19,294
C
31.758913
119
0.697419
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_conv3d_fprop_fusion.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level fused activation's scale+bias+relu and implicit GEMM convolution definitions that combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/predicated_scale_bias_vector_access_iterator.h" #include "cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h" #include "cutlass/gemm/warp/scale_bias_tile_iterator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for fused batch norm and Conv3dFprop template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementScaleBias, typename LayoutScaleBias, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kStrided > struct DefaultConv3dFpropFusion; ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassTensorOp convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dFprop specialzation for Analytic IteratorAlgorithm and multistage /// pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementScaleBias, typename LayoutScaleBias, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultConv3dFpropFusion < ElementA, LayoutA, ElementB, LayoutB, ElementScaleBias, LayoutScaleBias, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; /// Define iterators over tiles from scale/bias vectors using IteratorScaleBias = cutlass::conv::threadblock::PredicatedScaleBiasVectorAccessIterator< cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias, LayoutScaleBias>; using SmemIteratorScaleBias = cutlass::transform::threadblock::RegularScaleBiasVectorAccessIterator< cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias, LayoutScaleBias>; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static int const kThreadCount = 32; // Warp-level iterators to load scale and bias vectors using WarpIteratorScaleBias = cutlass::gemm::warp::ScaleBiasTileIterator< MatrixShape<WarpShape::kM, WarpShape::kK>, ElementScaleBias, LayoutScaleBias, MatrixShape<InstructionShape::kM, InstructionShape::kK>, typename WarpMmaTensorOp::IteratorA::Base::Policy, kThreadCount, MmaCore::WarpCount::kK>; // Define the Mma using Mma = threadblock::ImplicitGemmFpropFusionMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Global, IteratorScaleBias, SmemIteratorScaleBias, arch::CacheOperation::Always, MmaPolicy, WarpIteratorScaleBias, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionFusion< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dFprop specialzation for Optimzed IteratorAlgorithm and /// multistage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementScaleBias, typename LayoutScaleBias, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultConv3dFpropFusion < ElementA, LayoutA, ElementB, LayoutB, ElementScaleBias, LayoutScaleBias, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag >; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; /// Define iterators over tiles from scale/bias vectors using IteratorScaleBias = cutlass::conv::threadblock::PredicatedScaleBiasVectorAccessIterator< cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias, LayoutScaleBias>; using SmemIteratorScaleBias = cutlass::transform::threadblock::RegularScaleBiasVectorAccessIterator< cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias, LayoutScaleBias>; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static int const kThreadCount = 32; // Warp-level iterators to load scale and bias vectors using WarpIteratorScaleBias = cutlass::gemm::warp::ScaleBiasTileIterator< MatrixShape<WarpShape::kM, WarpShape::kK>, ElementScaleBias, LayoutScaleBias, MatrixShape<InstructionShape::kM, InstructionShape::kK>, typename WarpMmaTensorOp::IteratorA::Base::Policy, kThreadCount, MmaCore::WarpCount::kK>; // Define the Mma using Mma = threadblock::ImplicitGemmFpropFusionMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Global, IteratorScaleBias, SmemIteratorScaleBias, arch::CacheOperation::Always, MmaPolicy, WarpIteratorScaleBias, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionFusion< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
11,980
C
32.188366
100
0.700501
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_conv2d_fprop_with_reduction.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines a GEMM with Reduction based on an existing UniversalGemm kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d_fprop.h" #include "cutlass/conv/kernel/implicit_gemm_convolution_with_fused_epilogue.h" #include "cutlass/epilogue/threadblock/default_epilogue_with_reduction.h" #include "cutlass/epilogue/threadblock/epilogue_with_reduction.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename EpilogueReductionOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kStrided, /// Access granularity of A matrix in units of elements int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value, /// Access granularity of B matrix in units of elements int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value > struct DefaultConv2dFpropWithReduction { using ImplicitGemmBase = typename DefaultConv2dFprop< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm, StrideSupport, AlignmentA, AlignmentB >::Kernel; // Replace epilogue using Epilogue = typename cutlass::conv::kernel::detail::DefaultConvEpilogueWithReductionTensorOp< ArchTag, typename ImplicitGemmBase::Epilogue::Shape, typename ImplicitGemmBase::Epilogue::WarpMmaOperator, ImplicitGemmBase::Epilogue::kPartitionsK, ElementC, EpilogueOutputOp, EpilogueReductionOp, ImplicitGemmBase::Epilogue::kElementsPerAccess >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionWithFusedEpilogue< typename ImplicitGemmBase::Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
4,660
C
34.580152
100
0.665451
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_conv2d_fprop_with_broadcast.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines a GEMM with Reduction based on an existing UniversalGemm kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d_fprop.h" #include "cutlass/conv/kernel/implicit_gemm_convolution_with_fused_epilogue.h" #include "cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h" #include "cutlass/epilogue/threadblock/epilogue_with_broadcast.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kStrided, /// Access granularity of A matrix in units of elements int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value, /// Access granularity of B matrix in units of elements int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value > struct DefaultConv2dFpropWithBroadcast { using ImplicitGemmBase = typename DefaultConv2dFprop< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm, StrideSupport, AlignmentA, AlignmentB >::Kernel; // Replace epilogue using Epilogue = typename cutlass::conv::kernel::detail::DefaultConvEpilogueWithBroadcastTensorOp< ArchTag, typename ImplicitGemmBase::Epilogue::Shape, typename ImplicitGemmBase::Epilogue::WarpMmaOperator, ImplicitGemmBase::Epilogue::kPartitionsK, ElementC, typename EpilogueOutputOp::ElementT, ElementC, EpilogueOutputOp, ImplicitGemmBase::Epilogue::kElementsPerAccess >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionWithFusedEpilogue< typename ImplicitGemmBase::Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
4,658
C
34.564885
100
0.664448
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_conv2d_wgrad_fusion.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv2d_wgrad_output_gradient_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_wgrad_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_wgrad_output_gradient_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_wgrad_activation_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_tile_iterator.h" #include "cutlass/conv/threadblock/predicated_scale_bias_vector_iterator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dWgrad template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementScaleBias, typename LayoutScaleBias, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kStrided > struct DefaultConv2dWgradFusion; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassTensorOp convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dWgrad specialization for Analytic IteratorAlgorithm and multistage // pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementScaleBias, typename LayoutScaleBias, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultConv2dWgradFusion < ElementA, LayoutA, ElementB, LayoutB, ElementScaleBias, LayoutScaleBias, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; /// Define iterators over tiles from scale/bias vectors using IteratorScaleBias = cutlass::conv::threadblock::PredicatedScaleBiasVectorIterator< cutlass::MatrixShape<1, WarpShape::kN>, ElementScaleBias, LayoutScaleBias>; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmWgradFusionMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, IteratorScaleBias, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionFusion< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kWgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dWgrad specialization for Optimized IteratorAlgorithm and multistage // pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementScaleBias, typename LayoutScaleBias, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultConv2dWgradFusion < ElementA, LayoutA, ElementB, LayoutB, ElementScaleBias, LayoutScaleBias, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; /// Define iterators over tiles from scale/bias vectors using IteratorScaleBias = cutlass::conv::threadblock::PredicatedScaleBiasVectorIterator< cutlass::MatrixShape<1, WarpShape::kN>, ElementScaleBias, LayoutScaleBias>; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmWgradFusionMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, IteratorScaleBias, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionFusion< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kWgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
10,459
C
31.085889
100
0.685247
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_conv3d_fprop.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_analytic.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kStrided > struct DefaultConv3dFprop; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dFprop specialization for Analytic Iterator Algorithm /// and 2 stage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag > struct DefaultConv3dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage // pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultConv3dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Global, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dFprop specialization for Optimized Iterator Algorithm /// and 2 stage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag > struct DefaultConv3dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dFprop specialization for Optimized IteratorAlgorithm and multistage // pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultConv3dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Global, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
14,864
C
27.808139
100
0.696179
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/implicit_gemm_convolution_with_fused_epilogue.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined Implicit GEMM kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/aligned_buffer.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/semaphore.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/tensor.h" #include "cutlass/gemm/gemm.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/epilogue/threadblock/output_iterator_parameter.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad) typename ConvProblemSize_ = Conv2dProblemSize ///! Convolutional operator on 2D or 3D problem > struct ImplicitGemmConvolutionWithFusedEpilogue { using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; static Operator const kConvolutionalOperator = ConvOperator; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename EpilogueOutputOp::ElementOutput; /// Set output tensor C layout using LayoutC = LayoutA; using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator; using ElementCompute = typename EpilogueOutputOp::ElementCompute; using WarpMmaOperator = typename Mma::Policy::Operator; using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator; using MathOperator = typename ArchMmaOperator::Operator; using OperatorClass = typename WarpMmaOperator::OperatorClass; using ArchTag = typename WarpMmaOperator::ArchTag; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename WarpMmaOperator::Shape; using InstructionShape = typename ArchMmaOperator::Shape; static int const kStages = Mma::kStages; static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm; static StrideSupport const kStrideSupport = Mma::IteratorA::kStrideSupport; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; using TensorRefA = typename Mma::IteratorA::TensorRef; using TensorRefB = typename Mma::IteratorB::TensorRef; using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>; /// Check iterator A and B convolution dimension are the same and // set device::ImplicitGemmConvolution::kConvDim static_assert(Mma::IteratorA::kConvDim == Mma::IteratorB::kConvDim, "Convolution on different different dimensions is not supported"); static int const kConvDim = Mma::IteratorA::kConvDim; /// Conv dimension and problem size structure (Conv2d or Conv3d) using ConvProblemSize = ConvProblemSize_; static conv::GroupMode const kGroupMode = conv::GroupMode::kNone; /// Wgrad C stride idx for implicit gemm algorithm // Conv2d row-major matrix C (KxRSC) // Conv3d row-major matrix C (KxTRSC) static int const kWgradCStrideIdx = platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value ? 2 : 3; /// This chooses the appropriate stride element of the C tensor. static int const kTensorCStrideIdx = (kConvolutionalOperator == conv::Operator::kWgrad ? kWgradCStrideIdx : 0); // // // using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter< LayoutC, typename Epilogue::OutputTileIterator::Layout, TensorRefC, ConvOperator, ConvProblemSize >; /// Argument structure struct Arguments { // // Data members // ConvProblemSize problem_size; TensorRefA ref_A; TensorRefB ref_B; TensorRefC ref_C; TensorRefC ref_D; typename EpilogueOutputOp::Params output_op; SplitKMode split_k_mode; void * ptr_Vector; void * ptr_Tensor; typename LayoutC::Stride::Index ldr; typename LayoutC::Stride::Index ldt; // // Methods // /// Default ctor CUTLASS_HOST_DEVICE Arguments() { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size ): problem_size(problem_size) { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size, TensorRefA const & ref_A, TensorRefB const & ref_B, TensorRefC const & ref_C, TensorRefC const & ref_D, typename EpilogueOutputOp::Params const & output_op, SplitKMode const & split_k_mode = SplitKMode::kSerial, void * ptr_Vector = nullptr, void * ptr_Tensor = nullptr, typename LayoutC::Stride::Index ldr = 0, typename LayoutC::Stride::Index ldt = 0 ): problem_size(problem_size), ref_A(ref_A), ref_B(ref_B), ref_C(ref_C), ref_D(ref_D), output_op(output_op), split_k_mode(split_k_mode), ptr_Vector(ptr_Vector), ptr_Tensor(ptr_Tensor), ldr(ldr), ldt(ldt) { } }; /// Parameters structure struct Params { ConvProblemSize problem_size; cutlass::gemm::GemmCoord grid_tiled_shape; gemm::GemmCoord implicit_gemm_problem_size; int swizzle_log_tile; int gemm_k_iterations; typename Mma::IteratorA::Params iterator_A; typename Mma::IteratorA::Element const *ptr_A; typename Mma::IteratorB::Params iterator_B; typename Mma::IteratorB::Element const *ptr_B; typename Epilogue::OutputTileIterator::Params iterator_C; typename Epilogue::OutputTileIterator::Element *ptr_C; typename Epilogue::OutputTileIterator::Params iterator_D; typename Epilogue::OutputTileIterator::Element *ptr_D; typename EpilogueOutputOp::Params output_op; int *semaphore; SplitKMode split_k_mode; typename Epilogue::TensorTileIterator::Params params_Tensor; void * ptr_Vector; typename LayoutC::Stride::Index ldr; void * ptr_Tensor; // // Methods // CUTLASS_HOST_DEVICE Params(): swizzle_log_tile(0), gemm_k_iterations(0), ptr_Vector(nullptr), ldr(0), ptr_Tensor(nullptr) { } /// CUTLASS_HOST_DEVICE Params( Arguments const &args, int *semaphore = nullptr ): problem_size(args.problem_size), implicit_gemm_problem_size(cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size)), iterator_A(Mma::IteratorA::getParams(args.problem_size, args.ref_A.layout())), ptr_A(args.ref_A.data()), iterator_B(args.problem_size, args.ref_B.layout()), ptr_B(args.ref_B.data()), iterator_C(ConvOutputIteratorParameter::layout(args.ref_C)), ptr_C(args.ref_C.data()), iterator_D(ConvOutputIteratorParameter::layout(args.ref_D)), ptr_D(args.ref_D.data()), output_op(args.output_op), semaphore(semaphore), split_k_mode(args.split_k_mode), params_Tensor(args.ldt), ptr_Vector(args.ptr_Vector), ldr(args.ldr), ptr_Tensor(args.ptr_Tensor) { gemm_k_iterations = implicit_gemm_k_iterations(kConvolutionalOperator, ThreadblockShape::kK, args.problem_size); ThreadblockSwizzle threadblock_swizzle; grid_tiled_shape = threadblock_swizzle.get_tiled_shape( implicit_gemm_problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices); swizzle_log_tile = threadblock_swizzle.get_log_tile(grid_tiled_shape); } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; // // Methods // CUTLASS_HOST_DEVICE ImplicitGemmConvolutionWithFusedEpilogue() { } /// Executes one ImplicitGEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() || params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) { return; } // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.iterator_A, params.problem_size, params.ptr_A, thread_idx, MatrixCoord( threadblock_tile_idx.m() * Mma::Shape::kM, threadblock_tile_idx.k() * Mma::Shape::kK ) ); typename Mma::IteratorB iterator_B( params.iterator_B, params.problem_size, params.ptr_B, thread_idx, MatrixCoord( threadblock_tile_idx.k() * Mma::Shape::kK, threadblock_tile_idx.n() * Mma::Shape::kN ) ); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add mma(params.gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); // // Epilogue // EpilogueOutputOp output_op(params.output_op); // Construct the semaphore. int block_idx = threadblock_tile_idx.m() + threadblock_tile_idx.n() * params.grid_tiled_shape.m(); Semaphore semaphore(params.semaphore + block_idx, thread_idx); // Compute logical position within grid threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // If performing a reduction via split-K, fetch the initial synchronization if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_idx.k(), params.grid_tiled_shape.k()); } MatrixCoord threadblock_offset( threadblock_tile_idx.m() * Mma::Shape::kM, threadblock_tile_idx.n() * Mma::Shape::kN ); // Tile iterator writing to destination tensor typename Epilogue::OutputTileIterator iterator_D( params.iterator_D, params.ptr_D, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, threadblock_offset ); // Tile iterator reading from source accumulator tensor typename Epilogue::OutputTileIterator iterator_C( params.iterator_C, params.ptr_C, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, threadblock_offset ); typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor); // Define the reduction output pointer and move to the appropriate place typename Epilogue::ElementVector *ptr_Vector = static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector); // Additional tensor to load from typename Epilogue::TensorTileIterator tensor_iterator( params.params_Tensor, // Only the final block outputs Tensor ((params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) && (params.grid_tiled_shape.k() != threadblock_tile_idx.k() + 1)) ? nullptr : ptr_Tensor, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, threadblock_offset); // Construct the epilogue Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Move to appropriate location for this output tile if (ptr_Vector) { ptr_Vector += threadblock_offset.column() + threadblock_tile_idx.m() * params.ldr; } // Wait on the semaphore - this latency may have been covered by iterator construction if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_idx.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_idx.k()); } // Each split-k-slice writes to a unique tensor location else if (params.split_k_mode == SplitKMode::kParallel) { iterator_D.add_pointer_offset(threadblock_tile_idx.k() * cutlass::conv::implicit_gemm_tensor_c_size(ConvOperator, params.problem_size)); } // Execute the epilogue operator to update the destination tensor. epilogue(output_op, // Only the final block uses Vector ((params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) && (params.grid_tiled_shape.k() != threadblock_tile_idx.k() + 1)) ? nullptr : ptr_Vector, iterator_D, accumulators, iterator_C, tensor_iterator, ConvOutputIteratorParameter::extent(params.problem_size), threadblock_offset); // // Release the semaphore // if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_idx.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_idx.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
16,749
C
32.5
119
0.654188
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_conv2d_group_fprop.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_fixed_channels.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_few_channels.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_fixed_channels.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_few_channels.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dGroupFpro template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::GroupMode GroupMode, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kStrided, /// Access granularity of A matrix in units of elements int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value, /// Access granularity of B matrix in units of elements int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value > struct DefaultConv2dGroupFprop; ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassTensorOp convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dGroupFprop specialization for Analytic IteratorAlgorithm and multistage /// pipeline that supports all GroupMode. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::GroupMode GroupMode, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dGroupFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, GroupMode, IteratorAlgorithm::kAnalytic, StrideSupport, AlignmentA, AlignmentB > { static_assert(std::is_same<LayoutA, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); static_assert(std::is_same<LayoutB, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); static_assert(std::is_same<LayoutC, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA, GroupMode >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB, GroupMode >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv2dProblemSize, GroupMode >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dGroupFprop specialization for Optimized IteratorAlgorithm and multistage /// pipeline that supports GroupMode::kSingleGroup. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dGroupFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, GroupMode::kSingleGroup, IteratorAlgorithm::kOptimized, StrideSupport, AlignmentA, AlignmentB > { static_assert(std::is_same<LayoutA, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); static_assert(std::is_same<LayoutB, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); static_assert(std::is_same<LayoutC, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv2dProblemSize, GroupMode::kSingleGroup >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dGroupFprop specialization for Optimized IteratorAlgorithm and /// 2 stage pipeline that supports GroupMode::kSingleGroup. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dGroupFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, GroupMode::kSingleGroup, IteratorAlgorithm::kOptimized, StrideSupport, AlignmentA, AlignmentB > { static_assert(std::is_same<LayoutA, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); static_assert(std::is_same<LayoutB, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); static_assert(std::is_same<LayoutC, cutlass::layout::TensorNHWC>::value, "Current group conv only support NHWC layout"); // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop, Conv2dProblemSize, GroupMode::kSingleGroup >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
15,891
C
31.366599
103
0.701277
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/implicit_gemm_convolution_strided_dgrad.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined Implicit GEMM kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/aligned_buffer.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/semaphore.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/tensor.h" #include "cutlass/gemm/gemm.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/epilogue/threadblock/output_iterator_parameter.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad) typename ConvProblemSize_ = Conv2dProblemSize ///! Convolutional operator on 2D or 3D problem > struct ImplicitGemmConvolutionStridedDgrad { using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; static Operator const kConvolutionalOperator = ConvOperator; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename EpilogueOutputOp::ElementOutput; /// Set output tensor C layout using LayoutC = LayoutA; using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator; using ElementCompute = typename EpilogueOutputOp::ElementCompute; using WarpMmaOperator = typename Mma::Policy::Operator; using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator; using MathOperator = typename ArchMmaOperator::Operator; using OperatorClass = typename WarpMmaOperator::OperatorClass; using ArchTag = typename WarpMmaOperator::ArchTag; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename WarpMmaOperator::Shape; using InstructionShape = typename ArchMmaOperator::Shape; static int const kStages = Mma::kStages; static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm; static StrideSupport const kStrideSupport = Mma::IteratorA::kStrideSupport; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; using TensorRefA = typename Mma::IteratorA::TensorRef; using TensorRefB = typename Mma::IteratorB::TensorRef; using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>; /// Check iterator A and B convolution dimension are the same and // set device::ImplicitGemmConvolution::kConvDim static_assert(Mma::IteratorA::kConvDim == Mma::IteratorB::kConvDim, "Convolution on different different dimensions is not supported"); static int const kConvDim = Mma::IteratorA::kConvDim; /// Conv dimension and problem size structure (Conv2d or Conv3d) using ConvProblemSize = ConvProblemSize_; static conv::GroupMode const kGroupMode = conv::GroupMode::kNone; /// Wgrad C stride idx for implicit gemm algorithm // Conv2d row-major matrix C (KxRSC) // Conv3d row-major matrix C (KxTRSC) static int const kWgradCStrideIdx = platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value ? 2 : 3; /// This chooses the appropriate stride element of the C tensor. static int const kTensorCStrideIdx = (kConvolutionalOperator == conv::Operator::kWgrad ? kWgradCStrideIdx : 0); // Strided dgrad uses a specialized threadblock swizzle for functionality and performance static_assert((platform::is_same<ThreadblockSwizzle, threadblock::StridedDgradHorizontalThreadblockSwizzle>::value) || (platform::is_same<ThreadblockSwizzle, threadblock::StridedDgradIdentityThreadblockSwizzle<1>>::value) || (platform::is_same<ThreadblockSwizzle, threadblock::StridedDgradIdentityThreadblockSwizzle<4>>::value) || (platform::is_same<ThreadblockSwizzle, threadblock::StridedDgradIdentityThreadblockSwizzle<8>>::value), "Needs ThreadblockSwizzle type specialized for strided dgrad"); // // // using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter< LayoutC, typename Epilogue::OutputTileIterator::Layout, TensorRefC, ConvOperator, ConvProblemSize >; /// Argument structure struct Arguments { // // Data members // ConvProblemSize problem_size; TensorRefA ref_A; TensorRefB ref_B; TensorRefC ref_C; TensorRefC ref_D; typename EpilogueOutputOp::Params output_op; SplitKMode split_k_mode; // // Methods // /// Default ctor CUTLASS_HOST_DEVICE Arguments() { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size ): problem_size(problem_size) { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size, TensorRefA const & ref_A, TensorRefB const & ref_B, TensorRefC const & ref_C, TensorRefC const & ref_D, typename EpilogueOutputOp::Params const & output_op, SplitKMode const & split_k_mode = SplitKMode::kSerial ): problem_size(problem_size), ref_A(ref_A), ref_B(ref_B), ref_C(ref_C), ref_D(ref_D), output_op(output_op), split_k_mode(split_k_mode) { } }; /// Parameters structure struct Params { ConvProblemSize problem_size; cutlass::gemm::GemmCoord grid_tiled_shape; FastDivmod stride_h_divmod; FastDivmod stride_w_divmod; int gemm_k_iterations; typename Mma::IteratorA::Params iterator_A; typename Mma::IteratorA::Element const *ptr_A; typename Mma::IteratorB::Params iterator_B; typename Mma::IteratorB::Element const *ptr_B; typename Epilogue::OutputTileIterator::Params iterator_C; typename Epilogue::OutputTileIterator::Element *ptr_C; typename Epilogue::OutputTileIterator::Params iterator_D; typename Epilogue::OutputTileIterator::Element *ptr_D; typename EpilogueOutputOp::Params output_op; int *semaphore; SplitKMode split_k_mode; // // Methods // CUTLASS_HOST_DEVICE Params(): gemm_k_iterations(0) { } /// CUTLASS_HOST_DEVICE Params( Arguments const &args, int *semaphore = nullptr ): problem_size(args.problem_size), stride_h_divmod(args.problem_size.stride_h), stride_w_divmod(args.problem_size.stride_w), iterator_A(Mma::IteratorA::getParams(args.problem_size, args.ref_A.layout())), ptr_A(args.ref_A.data()), iterator_B(args.problem_size, args.ref_B.layout()), ptr_B(args.ref_B.data()), iterator_C(ConvOutputIteratorParameter::layout(args.ref_C), args.problem_size, ThreadblockShape::kM), ptr_C(args.ref_C.data()), iterator_D(ConvOutputIteratorParameter::layout(args.ref_D), args.problem_size, ThreadblockShape::kM), ptr_D(args.ref_D.data()), output_op(args.output_op), semaphore(semaphore), split_k_mode(args.split_k_mode) { gemm_k_iterations = implicit_gemm_k_iterations(kConvolutionalOperator, ThreadblockShape::kK, args.problem_size); ThreadblockSwizzle threadblock_swizzle; grid_tiled_shape = threadblock_swizzle.get_tiled_shape( kConvolutionalOperator, args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices); } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; // // Methods // CUTLASS_HOST_DEVICE ImplicitGemmConvolutionStridedDgrad() { } /// Executes one ImplicitGEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.grid_tiled_shape); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() || params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) { return; } // Compute position within threadblock int thread_idx = threadIdx.x; // Compute starting filter position for strided dgrad int tile_m_per_filter = strided_dgrad_tile_m_per_filter(params.problem_size, ThreadblockShape::kM); int filter_tile_m = (threadblock_tile_idx.m() / tile_m_per_filter); // The subsequent fast_divmod() operations are equivalent to the following logical computation: // // int start_r = filter_tile_m / (params.problem_size.stride_w); // int start_s = filter_tile_m % (params.problem_size.stride_w); int start_r, start_s; params.stride_w_divmod(start_r, start_s, filter_tile_m); int filter_r = start_r; int filter_s = start_s; if (params.problem_size.mode == Mode::kConvolution) { filter_r = (params.problem_size.R - 1 - filter_r); filter_s = (params.problem_size.S - 1 - filter_s); } // Starting h, w positions for filter position in gemm_k=0 int start_h, start_w; strided_dgrad_starting_coords( params.problem_size, params.stride_h_divmod, params.stride_w_divmod, filter_r, filter_s, start_h, start_w); if (start_h >= params.problem_size.H || start_w >= params.problem_size.W) { return; } typename Mma::FragmentC accumulators; accumulators.clear(); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); int lane_idx = threadIdx.x % 32; // Check if CTA contributes valid MMA (Dy * w) and accumulator will be non-zero after MMA if (start_r < params.problem_size.R && start_s < params.problem_size.S) { // Scale gemm_k_iterations for strided dgrad int gemm_k_iterations = (params.gemm_k_iterations / (params.problem_size.R * params.problem_size.S) ) * params.problem_size.num_gemm_k_filter_positions(start_r, start_s); // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.iterator_A, params.problem_size, params.ptr_A, thread_idx, params.stride_h_divmod, params.stride_w_divmod, start_r, start_s, MatrixCoord( threadblock_tile_idx.m() * Mma::Shape::kM, threadblock_tile_idx.k() * Mma::Shape::kK ) ); typename Mma::IteratorB iterator_B( params.iterator_B, params.problem_size, params.ptr_B, thread_idx, start_r, start_s, MatrixCoord( threadblock_tile_idx.k() * Mma::Shape::kK, threadblock_tile_idx.n() * Mma::Shape::kN ) ); // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); // Compute threadblock-scoped matrix multiply-add mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); } // // Epilogue // EpilogueOutputOp output_op(params.output_op); // Construct the semaphore. int block_idx = threadblock_tile_idx.m() + threadblock_tile_idx.n() * params.grid_tiled_shape.m(); Semaphore semaphore(params.semaphore + block_idx, thread_idx); // Compute logical position within grid threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.grid_tiled_shape); // If performing a reduction via split-K, fetch the initial synchronization if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_idx.k(), params.grid_tiled_shape.k()); } MatrixCoord threadblock_offset( threadblock_tile_idx.m() * Mma::Shape::kM, threadblock_tile_idx.n() * Mma::Shape::kN ); // Tile iterator writing to destination tensor typename Epilogue::OutputTileIterator iterator_D( params.iterator_D, params.ptr_D, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, params.stride_h_divmod, params.stride_w_divmod, start_r, start_s, threadblock_offset ); // Tile iterator reading from source accumulator tensor typename Epilogue::OutputTileIterator iterator_C( params.iterator_C, params.ptr_C, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, params.stride_h_divmod, params.stride_w_divmod, start_r, start_s, threadblock_offset ); // Construct the epilogue Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_idx.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_idx.k()); } // Each split-k-slice writes to a unique tensor location else if (params.split_k_mode == SplitKMode::kParallel) { iterator_D.add_pointer_offset(threadblock_tile_idx.k() * cutlass::conv::implicit_gemm_tensor_c_size(ConvOperator, params.problem_size)); } // Run efficient epilogue epilogue(output_op, iterator_D, accumulators, iterator_C); // // Release the semaphore // if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_idx.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_idx.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
17,222
C
33.935091
118
0.653583
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_conv2d_fprop_fusion.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level fused activation's scale+bias+relu and implicit GEMM convolution definitions that combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/predicated_scale_bias_vector_access_iterator.h" #include "cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h" #include "cutlass/gemm/warp/scale_bias_tile_iterator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for fused batch norm and Conv2dFprop template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementScaleBias, typename LayoutScaleBias, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kStrided > struct DefaultConv2dFpropFusion; ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassTensorOp convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage /// pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementScaleBias, typename LayoutScaleBias, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultConv2dFpropFusion < ElementA, LayoutA, ElementB, LayoutB, ElementScaleBias, LayoutScaleBias, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; /// Define iterators over tiles from scale/bias vectors using IteratorScaleBias = cutlass::conv::threadblock::PredicatedScaleBiasVectorAccessIterator< cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias, LayoutScaleBias>; using SmemIteratorScaleBias = cutlass::transform::threadblock::RegularScaleBiasVectorAccessIterator< cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias, LayoutScaleBias>; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static int const kThreadCount = 32; // Warp-level iterators to load scale and bias vectors using WarpIteratorScaleBias = cutlass::gemm::warp::ScaleBiasTileIterator< MatrixShape<WarpShape::kM, WarpShape::kK>, ElementScaleBias, LayoutScaleBias, MatrixShape<InstructionShape::kM, InstructionShape::kK>, typename WarpMmaTensorOp::IteratorA::Base::Policy, kThreadCount, MmaCore::WarpCount::kK>; // Define the Mma using Mma = threadblock::ImplicitGemmFpropFusionMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Global, IteratorScaleBias, SmemIteratorScaleBias, arch::CacheOperation::Always, MmaPolicy, WarpIteratorScaleBias, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionFusion< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and /// multistage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementScaleBias, typename LayoutScaleBias, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultConv2dFpropFusion < ElementA, LayoutA, ElementB, LayoutB, ElementScaleBias, LayoutScaleBias, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag >; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; /// Define iterators over tiles from scale/bias vectors using IteratorScaleBias = cutlass::conv::threadblock::PredicatedScaleBiasVectorAccessIterator< cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias, LayoutScaleBias>; using SmemIteratorScaleBias = cutlass::transform::threadblock::RegularScaleBiasVectorAccessIterator< cutlass::MatrixShape<1, ThreadblockShape::kK>, ElementScaleBias, LayoutScaleBias>; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static int const kThreadCount = 32; // Warp-level iterators to load scale and bias vectors using WarpIteratorScaleBias = cutlass::gemm::warp::ScaleBiasTileIterator< MatrixShape<WarpShape::kM, WarpShape::kK>, ElementScaleBias, LayoutScaleBias, MatrixShape<InstructionShape::kM, InstructionShape::kK>, typename WarpMmaTensorOp::IteratorA::Base::Policy, kThreadCount, MmaCore::WarpCount::kK>; // Define the Mma using Mma = threadblock::ImplicitGemmFpropFusionMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Global, IteratorScaleBias, SmemIteratorScaleBias, arch::CacheOperation::Always, MmaPolicy, WarpIteratorScaleBias, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionFusion< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
11,953
C
32.391061
100
0.700577
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_conv2d.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions for threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/threadblock/default_mma.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/conv/threadblock/threadblock_swizzle.h" #include "cutlass/epilogue/threadblock/default_epilogue_simt.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h" #include "cutlass/epilogue/threadblock/default_epilogue_with_reduction.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/threadblock/conv2d_tile_iterator.h" #include "cutlass/conv/threadblock/implicit_gemm_pipelined.h" #include "cutlass/conv/threadblock/implicit_gemm_multistage.h" #include "cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h" #include "cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h" #include "cutlass/conv/kernel/implicit_gemm_convolution.h" #include "cutlass/conv/kernel/implicit_gemm_convolution_fusion.h" #include "cutlass/conv/kernel/implicit_gemm_convolution_strided_dgrad.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template < typename ArchTag, typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename OutputOp > struct DefaultConvEpilogue { using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, OutputOp::kCount >::Epilogue; }; template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename OutputOp > struct DefaultConvEpilogue< arch::Sm70, Shape, WarpMmaTensorOp, PartitionsK, OutputOp > { using Epilogue = typename epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, OutputOp::kCount >::Epilogue; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ArchTag, typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename ElementTensor, typename ElementVector, typename OutputOp, int ElementsPerAccess > struct DefaultConvEpilogueWithBroadcastTensorOp { using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastTensorOp< Shape, WarpMmaTensorOp, PartitionsK, ElementOutput, ElementTensor, ElementVector, OutputOp, ElementsPerAccess >::Epilogue; }; template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename ElementTensor, typename ElementVector, typename OutputOp, int ElementsPerAccess > struct DefaultConvEpilogueWithBroadcastTensorOp< arch::Sm70, Shape, WarpMmaTensorOp, PartitionsK, ElementOutput, ElementTensor, ElementVector, OutputOp, ElementsPerAccess > { using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastVoltaTensorOp< Shape, WarpMmaTensorOp, PartitionsK, ElementOutput, ElementTensor, ElementVector, OutputOp, ElementsPerAccess >::Epilogue; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ArchTag, typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename OutputOp, typename ReductionOp, int ElementsPerAccess > struct DefaultConvEpilogueWithReductionTensorOp { using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< Shape, WarpMmaTensorOp, PartitionsK, ElementOutput, OutputOp, ReductionOp, ElementsPerAccess >::Epilogue; }; template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename OutputOp, typename ReductionOp, int ElementsPerAccess > struct DefaultConvEpilogueWithReductionTensorOp< arch::Sm70, Shape, WarpMmaTensorOp, PartitionsK, ElementOutput, OutputOp, ReductionOp, ElementsPerAccess > { using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithReductionVoltaTensorOp< Shape, WarpMmaTensorOp, PartitionsK, ElementOutput, OutputOp, ReductionOp, ElementsPerAccess >::Epilogue; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Defaults for strided Dgrad template < typename ArchTag, typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename OutputOp > struct DefaultConvEpilogueStridedDgrad { using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOpStridedDgrad< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, OutputOp::kCount >::Epilogue; }; template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename OutputOp > struct DefaultConvEpilogueStridedDgrad< arch::Sm70, Shape, WarpMmaTensorOp, PartitionsK, OutputOp > { using Epilogue = typename epilogue::threadblock::DefaultEpilogueVoltaTensorOpStridedDgrad< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, OutputOp::kCount >::Epilogue; }; } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
7,671
C
27.102564
100
0.675662
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_conv2d_wgrad.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv2d_wgrad_output_gradient_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_wgrad_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_wgrad_output_gradient_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_wgrad_activation_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_tile_iterator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dWgrad template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kStrided, /// Access granularity of A matrix in units of elements int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value, /// Access granularity of B matrix in units of elements int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value > struct DefaultConv2dWgrad; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassTensorOp convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dWgrad specialization for Analytic IteratorAlgorithm and multistage // pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dWgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, AccessTypeA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, AccessTypeB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kWgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dWgrad specialization for Analytic IteratorAlgorithm and two // pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dWgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, AccessTypeA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, AccessTypeB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kWgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dWgrad specialization for Optimized IteratorAlgorithm and multistage // pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dWgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, AccessTypeA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, AccessTypeB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kWgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dWgrad specialization for Optimized IteratorAlgorithm and two // pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dWgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, AccessTypeA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, AccessTypeB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kWgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassSimt convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dWgrad specialization for Analytic IteratorAlgorithm, /// multi-stage pipeline, and FFMA-based mainloop for SM80 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AccessTypeA, int AccessTypeB > struct DefaultConv2dWgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport, AccessTypeA, AccessTypeB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kWgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dWgrad specialization for Optimized IteratorAlgorithm, /// multi-stage pipeline, and FFMA-based mainloop for SM80 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AccessTypeA, int AccessTypeB > struct DefaultConv2dWgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport, AccessTypeA, AccessTypeB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kWgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dWgrad specialization for Analytic IteratorAlgorithm, /// 2 stage pipeline, and FFMA-based mainloop for SM50 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AccessTypeA, int AccessTypeB > struct DefaultConv2dWgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport, AccessTypeA, AccessTypeB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kWgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dWgrad specialization for Optimized IteratorAlgorithm, /// 2 stage pipeline, and FFMA-based mainloop for SM50 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AccessTypeA, int AccessTypeB > struct DefaultConv2dWgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport, AccessTypeA, AccessTypeB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dWgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dWgradActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kWgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
28,745
C
27.405138
100
0.700644
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_conv3d_wgrad.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv3d_wgrad_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv3d_wgrad_activation_tile_access_iterator_optimized.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dWgrad template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kStrided > struct DefaultConv3dWgrad; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dWgrad specialization for Analytic IteratorAlgorithm and multistage // pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultConv3dWgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv3dWgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv3dWgradActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kWgrad, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dWgrad specialization for Analytic IteratorAlgorithm and two // pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag > struct DefaultConv3dWgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dWgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dWgradActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kWgrad, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dWgrad specialization for Optimized IteratorAlgorithm and multistage // pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultConv3dWgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv3dWgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv3dWgradActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kWgrad, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dWgrad specialization for Optimized IteratorAlgorithm and two // pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag > struct DefaultConv3dWgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dWgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv3dWgradActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kWgrad, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
14,883
C
28.184314
100
0.698649
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_conv2d_dgrad.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_tile_iterator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dDgrad template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kStrided, /// Access granularity of A matrix in units of elements int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value, /// Access granularity of B matrix in units of elements int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value > struct DefaultConv2dDgrad; ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassTensorOp convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dDgrad specialization for Analytic IteratorAlgorithm Dgrad Strided and // multistage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport::kStrided, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, StrideSupport::kStrided, AccessTypeA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, StrideSupport::kStrided, AccessTypeB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOpStridedDgrad< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; /// Defines a kernel for Conv2dDgrad specialization for Analytic IteratorAlgorithm Dgrad Strided // and 2 stage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport::kStrided, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::TileIteratorStridedDgrad< cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, StrideSupport::kStrided, AccessTypeA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIteratorStridedDgrad< cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, StrideSupport::kStrided, AccessTypeB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogueStridedDgrad< ArchTag, ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dDgrad specialization for Analytic IteratorAlgorithm Dgrad Unity Strided // and multistage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport::kUnity, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, StrideSupport::kUnity, AccessTypeA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, StrideSupport::kUnity, AccessTypeB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; /// Defines a kernel for Conv2dDgrad specialization for Analytic IteratorAlgorithm Dgrad Unity // 2 stage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport::kUnity, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, StrideSupport::kUnity, AccessTypeA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, StrideSupport::kUnity, AccessTypeB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dDgrad specialization for optimized IteratorAlgorithm Dgrad Unity Strided // and multistage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport::kUnity, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, StrideSupport::kUnity, AccessTypeA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, StrideSupport::kUnity, AccessTypeB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; /// Defines a kernel for Conv2dDgrad specialization for Optimized IteratorAlgorithm Dgrad Strided and // multistage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport::kStrided, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, StrideSupport::kStrided, AccessTypeA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, StrideSupport::kStrided, AccessTypeB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOpStridedDgrad< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; /// Defines a kernel for Conv2dDgrad specialization for Optimized IteratorAlgorithm Dgrad Strided // and 2 stage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport::kStrided, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::TileIteratorStridedDgrad< cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, StrideSupport::kStrided, AccessTypeA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIteratorStridedDgrad< cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, StrideSupport::kStrided, AccessTypeB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogueStridedDgrad< ArchTag, ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; /// Defines a kernel for Conv2dDgrad specialization for Optimized IteratorAlgorithm Dgrad Unity // 2 stage pipeline template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport::kUnity, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, StrideSupport::kUnity, AccessTypeA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, StrideSupport::kUnity, AccessTypeB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassSimt convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dDgrad specialization for Analytic IteratorAlgorithm, /// multi-stage pipeline, and FFMA-based mainloop for SM80 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic, conv::StrideSupport::kUnity, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, conv::StrideSupport::kUnity >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, conv::StrideSupport::kUnity >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic, conv::StrideSupport::kStrided, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, conv::StrideSupport::kStrided >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, conv::StrideSupport::kStrided >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dDgrad specialization for Optimized IteratorAlgorithm, /// multi-stage pipeline, and FFMA-based mainloop for SM80 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport::kUnity, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, StrideSupport::kUnity >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, StrideSupport::kUnity >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized, conv::StrideSupport::kStrided, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, conv::StrideSupport::kStrided >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, conv::StrideSupport::kStrided >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dDgrad specialization for Analytic IteratorAlgorithm, /// 2 stage pipeline, and FFMA-based mainloop for SM50 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic, conv::StrideSupport::kUnity, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, conv::StrideSupport::kUnity > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, conv::StrideSupport::kUnity > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic, conv::StrideSupport::kStrided, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIteratorStridedDgrad< cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, conv::StrideSupport::kStrided > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIteratorStridedDgrad< cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, conv::StrideSupport::kStrided > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dDgrad specialization for Optimized IteratorAlgorithm, /// 2 stage pipeline, and FFMA-based mainloop for SM50 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport::kUnity, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, StrideSupport::kUnity > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, StrideSupport::kUnity > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, int AlignmentA, int AlignmentB > struct DefaultConv2dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized, conv::StrideSupport::kStrided, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIteratorStridedDgrad< cutlass::conv::threadblock::Conv2dDgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, conv::StrideSupport::kStrided > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIteratorStridedDgrad< cutlass::conv::threadblock::Conv2dDgradFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB, conv::StrideSupport::kStrided > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimtStridedDgrad< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolutionStridedDgrad< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad >; }; } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
53,546
C
26.77334
103
0.713854
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/direct_convolution.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a multi-staged Depthwise Convolution kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/aligned_buffer.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/semaphore.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/tensor.h" #include "cutlass/gemm/gemm.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/epilogue/threadblock/output_iterator_parameter.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters structure template <typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad) typename Arguments_, ///! Kernel Arguments typename ConvOutputIteratorParameter_, ///! Output Iterator Params typename ConvProblemSize_ = Conv2dProblemSize, ///! Convolutional operator on 2D or 3D problem conv::GroupMode GroupMode_ = conv::GroupMode::kNone, ///! Group mode typename ThreadBlockOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1> > ///! OutputShape per ThreadBlock struct DirectConvolutionParams { using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ThreadBlockOutputShape = ThreadBlockOutputShape_; static Operator const kConvolutionalOperator = ConvOperator; using ConvProblemSize = ConvProblemSize_; using Arguments = Arguments_; using ConvOutputIteratorParameter = ConvOutputIteratorParameter_; using ThreadblockShape = typename Mma::Shape; static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm; static conv::GroupMode const kGroupMode = GroupMode_; static int const kStages = Mma::kStages; ConvProblemSize problem_size; cutlass::gemm::GemmCoord grid_tiled_shape; gemm::GemmCoord implicit_gemm_problem_size; int swizzle_log_tile; int smem_size_; int gemm_k_iterations; int gemm_k_iterations_per_channel; typename Mma::IteratorA::Params iterator_A; typename Mma::IteratorA::Element const *ptr_A; typename Mma::IteratorB::Params iterator_B; typename Mma::IteratorB::Element const *ptr_B; typename Mma::IteratorB::Element *ptr_reordered_B; typename Epilogue::OutputTileIterator::Params iterator_C; typename Epilogue::OutputTileIterator::Element *ptr_C; typename Epilogue::OutputTileIterator::Params iterator_D; typename Epilogue::OutputTileIterator::Element *ptr_D; typename EpilogueOutputOp::Params output_op; int *semaphore; SplitKMode split_k_mode; int split_k_slices; // // Methods // CUTLASS_HOST_DEVICE DirectConvolutionParams() : swizzle_log_tile(0), gemm_k_iterations(0) {} /// CUTLASS_HOST_DEVICE DirectConvolutionParams(Arguments const &args, int *semaphore = nullptr) : problem_size(args.problem_size), implicit_gemm_problem_size( cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size)), iterator_A(Mma::IteratorA::getParams(args.problem_size, args.ref_A.layout())), ptr_A(args.ref_A.data()), iterator_B(Mma::IteratorB::getParams(args.problem_size, args.ref_B.layout())), ptr_B(args.ref_B.data()), ptr_reordered_B(args.ref_reordered_B.data()), iterator_C(ConvOutputIteratorParameter::layout(args.ref_C), args.problem_size), ptr_C(args.ref_C.data()), iterator_D(ConvOutputIteratorParameter::layout(args.ref_D), args.problem_size), ptr_D(args.ref_D.data()), output_op(args.output_op), semaphore(semaphore), split_k_mode(args.split_k_mode), split_k_slices(args.problem_size.split_k_slices) { gemm_k_iterations = depthwise_gemm_k_iterations<ThreadBlockOutputShape::kN, ThreadBlockOutputShape::kH, ThreadBlockOutputShape::kW>(kConvolutionalOperator, ThreadblockShape::kK, args.problem_size, kIteratorAlgorithm, kGroupMode, ThreadblockShape::kN); gemm_k_iterations_per_channel = implicit_gemm_k_iterations_per_channel( kConvolutionalOperator, ThreadblockShape::kK, args.problem_size, kIteratorAlgorithm); ThreadblockSwizzle threadblock_swizzle; grid_tiled_shape = threadblock_swizzle.get_tiled_shape( kConvolutionalOperator, problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices); swizzle_log_tile = threadblock_swizzle.get_log_tile(grid_tiled_shape); // Dynamic SMEM usage because stride and dilation are runtime params. smem_size_ = (iterator_A.activation_size * kStages + iterator_B.filter_size); } CUTLASS_HOST_DEVICE int get_smem_size() { // Dynamic Smem Size return smem_size_; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Params_, typename ElementB_> struct ReorderKernel { using Params = Params_; using ElementB = ElementB_; union SharedStorage {}; static unsigned int const kReorderKernelThreadPerCTA = 128; CUTLASS_HOST_DEVICE ReorderKernel() {} CUTLASS_HOST_DEVICE static dim3 get_grid_shape(Params const &params) { return dim3{static_cast<unsigned int>( (params.problem_size.filter_size() + kReorderKernelThreadPerCTA - 1) / kReorderKernelThreadPerCTA), 1, 1}; } CUTLASS_HOST_DEVICE static dim3 get_block_shape() { return dim3{kReorderKernelThreadPerCTA, 1, 1}; } CUTLASS_HOST_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { int64_t m = static_cast<int64_t>(params.problem_size.groups); int64_t n = static_cast<int64_t>(params.problem_size.filter_size() / params.problem_size.K); const ElementB *src_with_type = static_cast<const ElementB *>(params.ptr_B); ElementB *dst_with_type = static_cast<ElementB *>(params.ptr_reordered_B); int64_t linear_index = blockIdx.x * kReorderKernelThreadPerCTA + threadIdx.x; int64_t index_m = linear_index / n; int64_t index_n = linear_index % n; int64_t new_linear_index = index_m + index_n * m; if (linear_index < m * n) { dst_with_type[new_linear_index] = src_with_type[linear_index]; } return; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad) typename ConvProblemSize_ = Conv2dProblemSize, ///! Convolutional operator on 2D or 3D problem conv::GroupMode GroupMode_ = conv::GroupMode::kNone, ///! Group mode typename ThreadBlockOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1> > struct DirectConvolution { using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ThreadBlockOutputShape = ThreadBlockOutputShape_; static Operator const kConvolutionalOperator = ConvOperator; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename EpilogueOutputOp::ElementOutput; /// Set output tensor C layout using LayoutC = LayoutA; using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator; using ElementCompute = typename EpilogueOutputOp::ElementCompute; using WarpMmaOperator = typename Mma::Policy::Operator; using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator; using MathOperator = typename ArchMmaOperator::Operator; using OperatorClass = typename WarpMmaOperator::OperatorClass; using ArchTag = typename WarpMmaOperator::ArchTag; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename WarpMmaOperator::Shape; using InstructionShape = typename cutlass::gemm::GemmShape<1, 1, 1>; static int const kStages = Mma::kStages; static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm; static StrideSupport const kStrideSupport = Mma::IteratorA::kStrideSupport; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; using TensorRefA = typename Mma::IteratorA::TensorRef; using TensorRefB = typename Mma::IteratorB::TensorRef; using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>; /// Check iterator A and B convolution dimension are the same and // set device::ImplicitGemmConvolution::kConvDim static_assert(Mma::IteratorA::kConvDim == Mma::IteratorB::kConvDim, "Convolution on different different dimensions is not supported"); static int const kConvDim = Mma::IteratorA::kConvDim; /// Conv dimension and problem size structure (Conv2d or Conv3d) using ConvProblemSize = ConvProblemSize_; static conv::GroupMode const kGroupMode = GroupMode_; // // // using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter< LayoutC, typename Epilogue::OutputTileIterator::Layout, TensorRefC, ConvOperator, ConvProblemSize >; /// Argument structure struct Arguments { // // Data members // ConvProblemSize problem_size; TensorRefA ref_A; TensorRefB ref_B; TensorRefB ref_reordered_B; TensorRefC ref_C; TensorRefC ref_D; typename EpilogueOutputOp::Params output_op; SplitKMode split_k_mode; // // Methods // /// Default ctor CUTLASS_HOST_DEVICE Arguments() { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size ): problem_size(problem_size) { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size, TensorRefA const & ref_A, TensorRefB const & ref_B, TensorRefC const & ref_C, TensorRefC const & ref_D, typename EpilogueOutputOp::Params const & output_op, TensorRefB const & ref_reordered_B = nullptr, SplitKMode const & split_k_mode = SplitKMode::kSerial ): problem_size(problem_size), ref_A(ref_A), ref_B(ref_B), ref_C(ref_C), ref_D(ref_D), output_op(output_op), ref_reordered_B(ref_reordered_B), split_k_mode(split_k_mode) { } }; using Params = typename cutlass::conv::kernel::DirectConvolutionParams<Mma, Epilogue, ThreadblockSwizzle, kConvolutionalOperator, Arguments, ConvOutputIteratorParameter, ConvProblemSize, kGroupMode, ThreadBlockOutputShape>; using ReorderKernel = typename cutlass::conv::kernel::ReorderKernel<Params, ElementB>; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; // // Methods // CUTLASS_HOST_DEVICE DirectConvolution() { } /// Executes one ImplicitGEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if threadblock is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() || params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) { return; } // Compute position within threadblock int thread_idx = threadIdx.x; int iterator_column_offset = 0; int filter_row_offset = 0; if (kGroupMode != GroupMode::kNone) { if (kGroupMode == GroupMode::kDepthwise) { iterator_column_offset += threadblock_tile_idx.n() * Mma::Shape::kN; } } // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.iterator_A, params.problem_size, params.ptr_A, thread_idx, MatrixCoord( threadblock_tile_idx.m() + threadblock_tile_idx.k(), iterator_column_offset ) ); typename Mma::IteratorB iterator_B( params.iterator_B, params.problem_size, params.ptr_reordered_B, thread_idx, MatrixCoord( filter_row_offset, iterator_column_offset ) ); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // // Epilogue // EpilogueOutputOp output_op(params.output_op); // Compute logical position within grid threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); MatrixCoord threadblock_offset( threadblock_tile_idx.m() + threadblock_tile_idx.k(), threadblock_tile_idx.n() * Mma::Shape::kN ); // Tile iterator writing to destination tensor typename Epilogue::OutputTileIterator iterator_D( params.iterator_D, params.ptr_D, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, threadblock_offset ); // Tile iterator reading from source accumulator tensor typename Epilogue::OutputTileIterator iterator_C( params.iterator_C, params.ptr_C, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, threadblock_offset ); // Construct the epilogue Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Compute threadblock-scoped matrix multiply-add // Epilogue is fused in the mainloop mma(params.gemm_k_iterations, accumulators, iterator_A, params.iterator_A, iterator_B, params.iterator_B, accumulators, epilogue, output_op, iterator_D, iterator_C, params.split_k_slices); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
18,048
C
34.66996
123
0.633588
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_conv2d_fprop.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_fixed_channels.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_few_channels.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_fixed_channels.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_few_channels.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kStrided, /// Access granularity of A matrix in units of elements int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value, /// Access granularity of B matrix in units of elements int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value > struct DefaultConv2dFprop; ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassTensorOp convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage /// pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage /// pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kFixedChannels, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFixedChannels< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFixedChannels< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and two stage /// pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kFixedChannels, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFixedChannels< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFixedChannels< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage /// pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kFewChannels, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFewChannels< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFewChannels< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage /// pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kFewChannels, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFewChannels< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFewChannels< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage /// pipeline with interleaved layout. template < typename ElementA, typename ElementB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB, int InterleavedK > struct DefaultConv2dFprop < ElementA, layout::TensorNCxHWx<InterleavedK>, ElementB, layout::TensorCxRSKx<InterleavedK>, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>, ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp, Stages, MathOperatorTag, true>; // Define iterators over tiles from the A operand // Note GEMM shared memory threadmap is used here because conv global memory // layout needs to be mapped to fprop which is similar to the crosswise // layout which is used by the interleaved GEMM shared memory threadmap. // The Interleaved GEMM global memory layout is similar to the congruous // layout. using ThreadMapA = typename MmaCore::SmemThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, layout::TensorNCxHWx<InterleavedK>, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand // Note GEMM shared memory threadmap is used here because conv global memory // layout needs to be mapped to fprop which is similar to the crosswise // layout which is used by the interleaved GEMM shared memory threadmap. // The Interleaved GEMM global memory layout is similar to the congruous // layout. using ThreadMapB = typename MmaCore::SmemThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, layout::TensorCxRSKx<InterleavedK>, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Global, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount, InterleavedK >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm /// and 2 stage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and 2 stage /// pipeline with interleaved layout. template < typename ElementA, typename ElementB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB, int InterleavedK > struct DefaultConv2dFprop < ElementA, layout::TensorNCxHWx<InterleavedK>, ElementB, layout::TensorCxRSKx<InterleavedK>, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>, ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp, 2, MathOperatorTag, true>; // Define iterators over tiles from the A operand // Note GEMM shared memory threadmap is used here because conv global memory // layout needs to be mapped to fprop which is similar to the crosswise // layout which is used by the interleaved GEMM shared memory threadmap. // The Interleaved GEMM global memory layout is similar to the congruous // layout. using ThreadMapA = typename MmaCore::SmemThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, layout::TensorNCxHWx<InterleavedK>, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand // Note GEMM shared memory threadmap is used here because conv global memory // layout needs to be mapped to fprop which is similar to the crosswise // layout which is used by the interleaved GEMM shared memory threadmap. // The Interleaved GEMM global memory layout is similar to the congruous // layout. using ThreadMapB = typename MmaCore::SmemThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, layout::TensorCxRSKx<InterleavedK>, ThreadMapB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount, InterleavedK >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and /// multistage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, Stages, MathOperatorTag >; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; static cutlass::arch::CacheOperation::Kind const CacheOpB = ((sizeof_bits<ElementB>::value * AlignmentB) == 128) ? cutlass::arch::CacheOperation::Global : cutlass::arch::CacheOperation::Always; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, CacheOpB, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and // multistage pipeline with interleaved layout. template < typename ElementA, typename ElementB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB, int InterleavedK > struct DefaultConv2dFprop < ElementA, layout::TensorNCxHWx<InterleavedK>, ElementB, layout::TensorCxRSKx<InterleavedK>, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>, ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp, Stages, MathOperatorTag, true >; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::SmemThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, layout::TensorNCxHWx<InterleavedK>, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::SmemThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, layout::TensorCxRSKx<InterleavedK>, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Global, MmaPolicy, Stages >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount, InterleavedK >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm /// and 2 stage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA, AccessTypeA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB, AccessTypeB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename detail::DefaultConvEpilogue< ArchTag, ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm and 2 stage /// pipeline with interleaved layout. template < typename ElementA, typename ElementB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB, int InterleavedK > struct DefaultConv2dFprop < ElementA, layout::TensorNCxHWx<InterleavedK>, ElementB, layout::TensorCxRSKx<InterleavedK>, ElementC, LayoutC, ElementAccumulator, arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>, ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp, 2, MathOperatorTag, true>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::SmemThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, layout::TensorNCxHWx<InterleavedK>, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::SmemThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, layout::TensorCxRSKx<InterleavedK>, ThreadMapB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue< ThreadblockShape, WarpMmaTensorOp, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount, InterleavedK >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // OpClassSimt convolutions ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm, /// multi-stage pipeline, and FFMA-based mainloop for SM80 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm, /// multi-stage pipeline, and FFMA-based mainloop for SM80 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Always, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm, /// 2 stage pipeline, and FFMA-based mainloop for SM50 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm, /// 2 stage pipeline, and FFMA-based mainloop for SM50 template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, typename MathOperatorTag, conv::StrideSupport StrideSupport, int AlignmentA, int AlignmentB > struct DefaultConv2dFprop < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, 2, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport, AlignmentA, AlignmentB > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt, 2, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, LayoutA, ThreadMapA > >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::TileIterator< cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, LayoutB, ThreadMapB > >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmPipelined< ThreadblockShape, IteratorA, SmemIteratorA, IteratorB, SmemIteratorB, ElementC, LayoutC, MmaPolicy >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt< ThreadblockShape, WarpMmaSimtOp, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kFprop >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
56,838
C
27.562312
108
0.7122
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/implicit_gemm_convolution.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined Implicit GEMM kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/aligned_buffer.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/semaphore.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/tensor.h" #include "cutlass/gemm/gemm.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/epilogue/threadblock/output_iterator_parameter.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad) typename ConvProblemSize_ = Conv2dProblemSize, ///! Convolutional operator on 2D or 3D problem conv::GroupMode GroupMode_ = conv::GroupMode::kNone ///! Group mode > struct ImplicitGemmConvolution { using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; static Operator const kConvolutionalOperator = ConvOperator; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename EpilogueOutputOp::ElementOutput; /// Set output tensor C layout using LayoutC = LayoutA; using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator; using ElementCompute = typename EpilogueOutputOp::ElementCompute; using WarpMmaOperator = typename Mma::Policy::Operator; using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator; using MathOperator = typename ArchMmaOperator::Operator; using OperatorClass = typename WarpMmaOperator::OperatorClass; using ArchTag = typename WarpMmaOperator::ArchTag; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename WarpMmaOperator::Shape; using InstructionShape = typename ArchMmaOperator::Shape; static int const kStages = Mma::kStages; static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm; static StrideSupport const kStrideSupport = Mma::IteratorA::kStrideSupport; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; using TensorRefA = typename Mma::IteratorA::TensorRef; using TensorRefB = typename Mma::IteratorB::TensorRef; using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>; /// Check iterator A and B convolution dimension are the same and // set device::ImplicitGemmConvolution::kConvDim static_assert(Mma::IteratorA::kConvDim == Mma::IteratorB::kConvDim, "Convolution on different different dimensions is not supported"); static int const kConvDim = Mma::IteratorA::kConvDim; /// Conv dimension and problem size structure (Conv2d or Conv3d) using ConvProblemSize = ConvProblemSize_; static conv::GroupMode const kGroupMode = GroupMode_; /// Wgrad C stride idx for implicit gemm algorithm // Conv2d row-major matrix C (KxRSC) // Conv3d row-major matrix C (KxTRSC) static int const kWgradCStrideIdx = platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value ? 2 : 3; /// This chooses the appropriate stride element of the C tensor. static int const kTensorCStrideIdx = (kConvolutionalOperator == conv::Operator::kWgrad ? kWgradCStrideIdx : 0); // // // using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter< LayoutC, typename Epilogue::OutputTileIterator::Layout, TensorRefC, ConvOperator, ConvProblemSize >; /// Argument structure struct Arguments { // // Data members // ConvProblemSize problem_size; TensorRefA ref_A; TensorRefB ref_B; TensorRefC ref_C; TensorRefC ref_D; typename EpilogueOutputOp::Params output_op; SplitKMode split_k_mode; // // Methods // /// Default ctor CUTLASS_HOST_DEVICE Arguments() { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size ): problem_size(problem_size) { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size, TensorRefA const & ref_A, TensorRefB const & ref_B, TensorRefC const & ref_C, TensorRefC const & ref_D, typename EpilogueOutputOp::Params const & output_op, SplitKMode const & split_k_mode = SplitKMode::kSerial ): problem_size(problem_size), ref_A(ref_A), ref_B(ref_B), ref_C(ref_C), ref_D(ref_D), output_op(output_op), split_k_mode(split_k_mode) { } }; /// Parameters structure struct Params { ConvProblemSize problem_size; cutlass::gemm::GemmCoord grid_tiled_shape; gemm::GemmCoord implicit_gemm_problem_size; int swizzle_log_tile; int gemm_k_iterations; int gemm_k_iterations_per_channel; typename Mma::IteratorA::Params iterator_A; typename Mma::IteratorA::Element const *ptr_A; typename Mma::IteratorB::Params iterator_B; typename Mma::IteratorB::Element const *ptr_B; typename Epilogue::OutputTileIterator::Params iterator_C; typename Epilogue::OutputTileIterator::Element *ptr_C; typename Epilogue::OutputTileIterator::Params iterator_D; typename Epilogue::OutputTileIterator::Element *ptr_D; typename EpilogueOutputOp::Params output_op; int *semaphore; SplitKMode split_k_mode; // // Methods // CUTLASS_HOST_DEVICE Params(): swizzle_log_tile(0), gemm_k_iterations(0) { } /// CUTLASS_HOST_DEVICE Params( Arguments const &args, int *semaphore = nullptr ): problem_size(args.problem_size), implicit_gemm_problem_size(cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size)), iterator_A(Mma::IteratorA::getParams(args.problem_size, args.ref_A.layout())), ptr_A(args.ref_A.data()), iterator_B(args.problem_size, args.ref_B.layout()), ptr_B(args.ref_B.data()), iterator_C(ConvOutputIteratorParameter::layout(args.ref_C)), ptr_C(args.ref_C.data()), iterator_D(ConvOutputIteratorParameter::layout(args.ref_D)), ptr_D(args.ref_D.data()), output_op(args.output_op), semaphore(semaphore), split_k_mode(args.split_k_mode) { gemm_k_iterations = implicit_gemm_k_iterations( kConvolutionalOperator, ThreadblockShape::kK, args.problem_size, kIteratorAlgorithm, kGroupMode, ThreadblockShape::kN); gemm_k_iterations_per_channel = implicit_gemm_k_iterations_per_channel( kConvolutionalOperator, ThreadblockShape::kK, args.problem_size, kIteratorAlgorithm); ThreadblockSwizzle threadblock_swizzle; grid_tiled_shape = threadblock_swizzle.get_tiled_shape( implicit_gemm_problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices); swizzle_log_tile = threadblock_swizzle.get_log_tile(grid_tiled_shape); } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; // // Methods // CUTLASS_HOST_DEVICE ImplicitGemmConvolution() { } /// Executes one ImplicitGEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() || params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) { return; } // Compute position within threadblock int thread_idx = threadIdx.x; int iterator_A_column_offset = threadblock_tile_idx.k() * Mma::Shape::kK; if (kGroupMode != GroupMode::kNone) { if (kGroupMode != GroupMode::kDepthwise) { int k_per_group = params.problem_size.K / params.problem_size.groups; int group_idx = threadblock_tile_idx.n() * Mma::Shape::kN / k_per_group; int channels_per_group = params.problem_size.C / params.problem_size.groups; iterator_A_column_offset += group_idx * channels_per_group; } else { iterator_A_column_offset += threadblock_tile_idx.n() * Mma::Shape::kN; } } // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.iterator_A, params.problem_size, params.ptr_A, thread_idx, MatrixCoord( threadblock_tile_idx.m() * Mma::Shape::kM, iterator_A_column_offset ) ); typename Mma::IteratorB iterator_B( params.iterator_B, params.problem_size, params.ptr_B, thread_idx, MatrixCoord( threadblock_tile_idx.k() * Mma::Shape::kK, threadblock_tile_idx.n() * Mma::Shape::kN ) ); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add mma(params.gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, params.gemm_k_iterations_per_channel); // // Epilogue // EpilogueOutputOp output_op(params.output_op); // Construct the semaphore. int block_idx = threadblock_tile_idx.m() + threadblock_tile_idx.n() * params.grid_tiled_shape.m(); Semaphore semaphore(params.semaphore + block_idx, thread_idx); // Compute logical position within grid threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // If performing a reduction via split-K, fetch the initial synchronization if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_idx.k(), params.grid_tiled_shape.k()); } MatrixCoord threadblock_offset( threadblock_tile_idx.m() * Mma::Shape::kM, threadblock_tile_idx.n() * Mma::Shape::kN ); // Tile iterator writing to destination tensor typename Epilogue::OutputTileIterator iterator_D( params.iterator_D, params.ptr_D, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, threadblock_offset ); // Tile iterator reading from source accumulator tensor typename Epilogue::OutputTileIterator iterator_C( params.iterator_C, params.ptr_C, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, threadblock_offset ); // Construct the epilogue Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_idx.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_idx.k()); } // Each split-k-slice writes to a unique tensor location else if (params.split_k_mode == SplitKMode::kParallel) { iterator_D.add_pointer_offset(threadblock_tile_idx.k() * cutlass::conv::implicit_gemm_tensor_c_size(ConvOperator, params.problem_size)); } // Run efficient epilogue epilogue(output_op, iterator_D, accumulators, iterator_C); // // Release the semaphore // if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_idx.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_idx.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
15,454
C
32.818381
124
0.659635
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/implicit_gemm_convolution_fusion.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined fused activation's scale+bias+relu and Implicit GEMM kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/aligned_buffer.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/semaphore.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/tensor.h" #include "cutlass/gemm/gemm.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/epilogue/threadblock/output_iterator_parameter.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad) typename ConvProblemSize_ = Conv2dProblemSize ///! Convolutional operator on 2D or 3D problem > struct ImplicitGemmConvolutionFusion { using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; static Operator const kConvolutionalOperator = ConvOperator; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementScaleBias = typename Mma::IteratorScaleBias::Element; using LayoutScaleBias = typename Mma::IteratorScaleBias::Layout; using ElementC = typename EpilogueOutputOp::ElementOutput; using LayoutC = LayoutA; using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator; using ElementCompute = typename EpilogueOutputOp::ElementCompute; using WarpMmaOperator = typename Mma::Policy::Operator; using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator; using MathOperator = typename ArchMmaOperator::Operator; using OperatorClass = typename WarpMmaOperator::OperatorClass; using ArchTag = typename WarpMmaOperator::ArchTag; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename WarpMmaOperator::Shape; using InstructionShape = typename ArchMmaOperator::Shape; static int const kStages = Mma::kStages; static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; using TensorRefA = typename Mma::IteratorA::TensorRef; using TensorRefB = typename Mma::IteratorB::TensorRef; using TensorRefScaleBias = typename Mma::IteratorScaleBias::TensorRef; using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>; /// Check iterator A and B convolution dimension are the same and // set device::ImplicitGemmConvolution::kConvDim static_assert(Mma::IteratorA::kConvDim == Mma::IteratorB::kConvDim, "Convolution on different different dimensions is not supported"); static int const kConvDim = Mma::IteratorA::kConvDim; /// Conv dimension and problem size structure (Conv2d or Conv3d) using ConvProblemSize = ConvProblemSize_; static conv::GroupMode const kGroupMode = conv::GroupMode::kNone; /// Wgrad C stride idx for implicit gemm algorithm // Conv2d row-major matrix C (KxRSC) // Conv3d row-major matrix C (KxTRSC) static int const kWgradCStrideIdx = platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value ? 2 : 3; /// This chooses the appropriate stride element of the C tensor. static int const kTensorCStrideIdx = (kConvolutionalOperator == conv::Operator::kWgrad ? kWgradCStrideIdx : 0); // // // using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter< LayoutC, typename Epilogue::OutputTileIterator::Layout, TensorRefC, ConvOperator, ConvProblemSize >; /// Argument structure struct Arguments { // // Data members // ConvProblemSize problem_size; TensorRefA ref_A; TensorRefB ref_B; TensorRefScaleBias ref_scale; TensorRefScaleBias ref_bias; TensorRefC ref_C; TensorRefC ref_D; typename EpilogueOutputOp::Params output_op; SplitKMode split_k_mode; // // Methods // /// Default ctor CUTLASS_HOST_DEVICE Arguments() { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size ): problem_size(problem_size) { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size, TensorRefA const & ref_A, TensorRefB const & ref_B, TensorRefScaleBias const & ref_scale, TensorRefScaleBias const & ref_bias, TensorRefC const & ref_C, TensorRefC const & ref_D, typename EpilogueOutputOp::Params const & output_op, SplitKMode const & split_k_mode = SplitKMode::kSerial ): problem_size(problem_size), ref_A(ref_A), ref_B(ref_B), ref_scale(ref_scale), ref_bias(ref_bias), ref_C(ref_C), ref_D(ref_D), output_op(output_op), split_k_mode(split_k_mode) { } }; /// Parameters structure struct Params { ConvProblemSize problem_size; cutlass::gemm::GemmCoord grid_tiled_shape; gemm::GemmCoord implicit_gemm_problem_size; int swizzle_log_tile; int gemm_k_iterations; typename Mma::IteratorA::Params iterator_A; typename Mma::IteratorA::Element const *ptr_A; typename Mma::IteratorB::Params iterator_B; typename Mma::IteratorB::Element const *ptr_B; typename Mma::IteratorScaleBias::Params iterator_scale_bias; typename Mma::IteratorScaleBias::Element const *ptr_scale; typename Mma::IteratorScaleBias::Element const *ptr_bias; typename Epilogue::OutputTileIterator::Params iterator_C; typename Epilogue::OutputTileIterator::Element *ptr_C; typename Epilogue::OutputTileIterator::Params iterator_D; typename Epilogue::OutputTileIterator::Element *ptr_D; typename EpilogueOutputOp::Params output_op; int *semaphore; SplitKMode split_k_mode; // // Methods // CUTLASS_HOST_DEVICE Params(): swizzle_log_tile(0), gemm_k_iterations(0) { } /// CUTLASS_HOST_DEVICE Params( Arguments const &args, int *semaphore = nullptr ): problem_size(args.problem_size), implicit_gemm_problem_size(cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size)), iterator_A(Mma::IteratorA::getParams(args.problem_size, args.ref_A.layout())), ptr_A(args.ref_A.data()), iterator_B(args.problem_size, args.ref_B.layout()), ptr_B(args.ref_B.data()), iterator_scale_bias(args.problem_size, args.ref_scale.layout()), ptr_scale(args.ref_scale.data()), ptr_bias(args.ref_bias.data()), iterator_C(ConvOutputIteratorParameter::layout(args.ref_C)), ptr_C(args.ref_C.data()), iterator_D(ConvOutputIteratorParameter::layout(args.ref_D)), ptr_D(args.ref_D.data()), output_op(args.output_op), semaphore(semaphore), split_k_mode(args.split_k_mode) { gemm_k_iterations = implicit_gemm_k_iterations(kConvolutionalOperator, ThreadblockShape::kK, args.problem_size); ThreadblockSwizzle threadblock_swizzle; grid_tiled_shape = threadblock_swizzle.get_tiled_shape( implicit_gemm_problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices); swizzle_log_tile = threadblock_swizzle.get_log_tile(grid_tiled_shape); } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; // // Methods // CUTLASS_HOST_DEVICE ImplicitGemmConvolutionFusion() { } /// Executes one ImplicitGEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() || params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) { return; } // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A operand typename Mma::IteratorA iterator_A( params.iterator_A, params.problem_size, params.ptr_A, thread_idx, MatrixCoord( threadblock_tile_idx.m() * Mma::Shape::kM, threadblock_tile_idx.k() * Mma::Shape::kK ) ); // Construct iterators to B operand typename Mma::IteratorB iterator_B( params.iterator_B, params.problem_size, params.ptr_B, thread_idx, MatrixCoord( threadblock_tile_idx.k() * Mma::Shape::kK, threadblock_tile_idx.n() * Mma::Shape::kN ) ); // Construct iterators to A scale/bias vector typename Mma::IteratorScaleBias iterator_scale_bias( params.iterator_scale_bias, params.problem_size, params.ptr_scale, params.ptr_bias, thread_idx, MatrixCoord( 0, (kConvolutionalOperator == conv::Operator::kFprop) ? (threadblock_tile_idx.k() * Mma::Shape::kK) : // Wgrad (threadblock_tile_idx.n() * Mma::Shape::kN) ) ); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add mma(params.gemm_k_iterations, accumulators, iterator_A, iterator_B, iterator_scale_bias, accumulators); // // Epilogue // EpilogueOutputOp output_op(params.output_op); // Construct the semaphore. int block_idx = threadblock_tile_idx.m() + threadblock_tile_idx.n() * params.grid_tiled_shape.m(); Semaphore semaphore(params.semaphore + block_idx, thread_idx); // Compute logical position within grid threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // If performing a reduction via split-K, fetch the initial synchronization if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_idx.k(), params.grid_tiled_shape.k()); } MatrixCoord threadblock_offset( threadblock_tile_idx.m() * Mma::Shape::kM, threadblock_tile_idx.n() * Mma::Shape::kN ); // Tile iterator writing to destination tensor typename Epilogue::OutputTileIterator iterator_D( params.iterator_D, params.ptr_D, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, threadblock_offset ); // Tile iterator reading from source accumulator tensor typename Epilogue::OutputTileIterator iterator_C( params.iterator_C, params.ptr_C, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, threadblock_offset ); // Construct the epilogue Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_idx.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_idx.k()); } // Each split-k-slice writes to a unique tensor location else if (params.split_k_mode == SplitKMode::kParallel) { iterator_D.add_pointer_offset(threadblock_tile_idx.k() * cutlass::conv::implicit_gemm_tensor_c_size(ConvOperator, params.problem_size)); } // Run efficient epilogue epilogue(output_op, iterator_D, accumulators, iterator_C); // // Release the semaphore // if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_idx.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_idx.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
15,709
C
32.857759
119
0.660704
NVIDIA/warp/warp/native/cutlass/include/cutlass/conv/kernel/default_conv3d_dgrad.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv3d_dgrad_filter_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv3d_dgrad_filter_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_tile_iterator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dDgrad template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized, conv::StrideSupport StrideSupport = StrideSupport::kStrided > struct DefaultConv3dDgrad; /// Defines a kernel for Conv3dDgrad specialization for Analytic IteratorAlgorithm Dgrad Strided // and multistage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultConv3dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kAnalytic, StrideSupport::kStrided > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv3dDgradOutputGradientTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, StrideSupport::kStrided >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv3dDgradFilterTileAccessIteratorAnalytic< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Global, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv3dDgrad specialization for Optimized IteratorAlgorithm Dgrad Strided // and multistage pipeline. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename EpilogueOutputOp, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag > struct DefaultConv3dDgrad < ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, MathOperatorTag, IteratorAlgorithm::kOptimized, StrideSupport::kUnity > { // Define the core components from GEMM using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementAccumulator, layout::RowMajor, OperatorClass, Stages, MathOperatorTag>; // Define iterators over tiles from the A operand using ThreadMapA = typename MmaCore::IteratorThreadMapA; using IteratorA = cutlass::conv::threadblock::Conv3dDgradOutputGradientTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>, ElementA, ThreadMapA, StrideSupport::kUnity >; using SmemIteratorA = typename MmaCore::SmemIteratorA; // Define iterators over tiles from the B operand using ThreadMapB = typename MmaCore::IteratorThreadMapB; using IteratorB = cutlass::conv::threadblock::Conv3dDgradFilterTileAccessIteratorOptimized< cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>, ElementB, ThreadMapB >; using SmemIteratorB = typename MmaCore::SmemIteratorB; // Warp-level GEMM components using WarpMmaTensorOp = typename MmaCore::MmaTensorOp; using MmaPolicy = typename MmaCore::MmaPolicy; // Define the Mma using Mma = threadblock::ImplicitGemmMultistage< ThreadblockShape, IteratorA, SmemIteratorA, arch::CacheOperation::Always, IteratorB, SmemIteratorB, arch::CacheOperation::Global, MmaPolicy, Stages >; // Define the epilogue using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< ThreadblockShape, WarpMmaTensorOp, 1, EpilogueOutputOp, EpilogueOutputOp::kCount >::Epilogue; // Define the kernel using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution< Mma, Epilogue, ThreadblockSwizzle, conv::Operator::kDgrad, Conv3dProblemSize >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
9,324
C
29.674342
100
0.701416
NVIDIA/warp/warp/native/cutlass/include/cutlass/platform/platform.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once /** * \file * \brief C++ features that may be otherwise unimplemented for CUDA device functions. * * This file has three components: * * (1) Macros: * - Empty macro defines for C++ keywords not supported by the current * version of C++. These simply allow compilation to proceed (but do * not provide the added semantics). * - \p noexcept * - \p constexpr * - \p nullptr * - \p static_assert * * - Macro functions that we need in constant expressions because the * C++ equivalents require constexpr compiler support. These are * prefixed with \p __NV_STD_* * - \p __NV_STD_MAX * - \p __NV_STD_MIN * * (2) Re-implementations of STL functions and types: * - C++ features that need the \p __device__ annotation. These are * placed into the \p platform namespace. * - \p abs * - \p plus * - \p less * - \p greater * - \p min * - \p max * - \p methods on std::pair (==, !=, <, <=, >, >=, and make_pair()) * * (3) Stop-gap implementations of unsupported STL functions and types: * - STL functions and types defined by C++ 11/14/17/etc. that are not * provided by the current version of C++. These are placed into the * \p platform namespace * - \p integral_constant * - \p nullptr_t * - \p true_type * - \p false_type * - \p bool_constant * - \p enable_if * - \p conditional * - \p is_same * - \p is_base_of * - \p remove_const * - \p remove_volatile * - \p remove_cv * - \p is_volatile * - \p is_pointer * - \p is_void * - \p is_integral * - \p is_floating_point * - \p is_arithmetic * - \p is_fundamental * - \p is_trivially_copyable * - \p alignment_of * - \p aligned_storage * * (4) Functions and types that are STL-like (but aren't in the STL): * - \p TODO: min and max functors? * * The idea is that, as we drop support for older compilers, we can simply #define * the \p __NV_STD_XYZ macros and \p platform namespace to alias their C++ * counterparts (or trivially find-and-replace their occurrences in code text). */ //----------------------------------------------------------------------------- // Dependencies //----------------------------------------------------------------------------- #if defined(__CUDACC_RTC__) #include <cuda/std/cstdint> #else #include <stdint.h> #endif #if !defined(__CUDACC_RTC__) //----------------------------------------------------------------------------- // Include STL files that platform provides functionality for //----------------------------------------------------------------------------- #include <algorithm> // Minimum/maximum operations #include <cstddef> // nullptr_t #include <functional> // Arithmetic operations #include <utility> // For methods on std::pair #if (!defined(_MSC_VER) && (__cplusplus >= 201103L)) || (defined(_MSC_VER) && (_MS_VER >= 1500)) #include <type_traits> // For integral constants, conditional metaprogramming, and type traits #endif #include "cutlass/cutlass.h" #endif //----------------------------------------------------------------------------- // OS //----------------------------------------------------------------------------- #if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) #define CUTLASS_OS_WINDOWS #endif /****************************************************************************** * Macros ******************************************************************************/ //----------------------------------------------------------------------------- // Keywords //----------------------------------------------------------------------------- /// noexcept, constexpr #if (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1900)) #ifndef noexcept #define noexcept #endif #ifndef constexpr #define constexpr #endif #endif /// nullptr #if (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1310)) #ifndef nullptr #define nullptr 0 #endif #endif /// static_assert #if (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1600)) #ifndef static_assert #define __platform_cat_(a, b) a##b #define __platform_cat(a, b) __platform_cat_(a, b) #define static_assert(__e, __m) typedef int __platform_cat(AsSeRt, __LINE__)[(__e) ? 1 : -1] #endif #endif //----------------------------------------------------------------------------- // Functions //----------------------------------------------------------------------------- /// Select maximum(a, b) #ifndef __NV_STD_MAX #define __NV_STD_MAX(a, b) (((b) > (a)) ? (b) : (a)) #endif /// Select minimum(a, b) #ifndef __NV_STD_MIN #define __NV_STD_MIN(a, b) (((b) < (a)) ? (b) : (a)) #endif /****************************************************************************** * Re-implementations ******************************************************************************/ namespace cutlass { namespace platform { //----------------------------------------------------------------------------- // Abs operations <algorithm> //----------------------------------------------------------------------------- #if defined(__CUDACC_RTC__) /// std::abs CUTLASS_HOST_DEVICE constexpr int abs(int a) { return (a < 0) ? -a : a; } CUTLASS_HOST_DEVICE constexpr long long abs(long long a) { return (a < 0) ? -a : a; } #else using std::abs; #endif //----------------------------------------------------------------------------- // Minimum/maximum operations <algorithm> //----------------------------------------------------------------------------- /// std::min template <typename T> CUTLASS_HOST_DEVICE constexpr const T& min(const T& a, const T& b) { return (b < a) ? b : a; } /// std::max template <typename T> CUTLASS_HOST_DEVICE constexpr const T& max(const T& a, const T& b) { return (a < b) ? b : a; } #if !defined(__CUDACC_RTC__) //----------------------------------------------------------------------------- // Methods on std::pair //----------------------------------------------------------------------------- using std::pair; template <class T1, class T2> CUTLASS_HOST_DEVICE constexpr bool operator==(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) { return (lhs.first == rhs.first) && (lhs.second == rhs.second); } template <class T1, class T2> CUTLASS_HOST_DEVICE constexpr bool operator!=(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) { return (lhs.first != rhs.first) && (lhs.second != rhs.second); } template <class T1, class T2> CUTLASS_HOST_DEVICE constexpr bool operator<(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) { return (lhs.first < rhs.first) ? true : (rhs.first < lhs.first) ? false : (lhs.second < rhs.second); } template <class T1, class T2> CUTLASS_HOST_DEVICE constexpr bool operator<=(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) { return !(rhs < lhs); } template <class T1, class T2> CUTLASS_HOST_DEVICE constexpr bool operator>(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) { return (rhs < lhs); } template <class T1, class T2> CUTLASS_HOST_DEVICE constexpr bool operator>=(const pair<T1, T2>& lhs, const pair<T1, T2>& rhs) { return !(lhs < rhs); } template <class T1, class T2> CUTLASS_HOST_DEVICE std::pair<T1, T2> make_pair(T1 t, T2 u) { std::pair<T1, T2> retval; retval.first = t; retval.second = u; return retval; } #endif } // namespace platform /****************************************************************************** * Implementations of C++ 11/14/17/... STL features ******************************************************************************/ namespace platform { //----------------------------------------------------------------------------- // Integral constant helper types <type_traits> //----------------------------------------------------------------------------- #if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500)) /// std::integral_constant template <typename value_t, value_t V> struct integral_constant; /// std::integral_constant template <typename value_t, value_t V> struct integral_constant { static const value_t value = V; typedef value_t value_type; typedef integral_constant<value_t, V> type; CUTLASS_HOST_DEVICE operator value_type() const { return value; } CUTLASS_HOST_DEVICE const value_type operator()() const { return value; } }; #else using std::integral_constant; using std::pair; #endif /// The type used as a compile-time boolean with true value. typedef integral_constant<bool, true> true_type; /// The type used as a compile-time boolean with false value. typedef integral_constant<bool, false> false_type; #if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus <= 201402L)) || (defined(_MSC_VER) && (_MSC_VER < 1900)) /// std::bool_constant template <bool V> struct bool_constant : platform::integral_constant<bool, V> {}; #else using std::bool_constant; #endif #if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1700)) /// std::nullptr_t struct nullptr_t {}; #else using std::nullptr_t; #endif //----------------------------------------------------------------------------- // Conditional metaprogramming <type_traits> //----------------------------------------------------------------------------- #if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1600)) /// std::enable_if (true specialization) template <bool C, typename T = void> struct enable_if { typedef T type; }; /// std::enable_if (false specialization) template <typename T> struct enable_if<false, T> {}; /// std::conditional (true specialization) template <bool B, class T, class F> struct conditional { typedef T type; }; /// std::conditional (false specialization) template <class T, class F> struct conditional<false, T, F> { typedef F type; }; #else using std::enable_if; using std::conditional; #endif //----------------------------------------------------------------------------- // Const/volatility specifiers <type_traits> //----------------------------------------------------------------------------- #if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500)) /// std::remove_const (non-const specialization) template <typename T> struct remove_const { typedef T type; }; /// std::remove_const (const specialization) template <typename T> struct remove_const<const T> { typedef T type; }; /// std::remove_volatile (non-volatile specialization) template <typename T> struct remove_volatile { typedef T type; }; /// std::remove_volatile (volatile specialization) template <typename T> struct remove_volatile<volatile T> { typedef T type; }; /// std::remove_cv template <typename T> struct remove_cv { typedef typename remove_volatile<typename remove_const<T>::type>::type type; }; #else using std::remove_const; using std::remove_volatile; using std::remove_cv; #endif //----------------------------------------------------------------------------- // Type relationships <type_traits> //----------------------------------------------------------------------------- #if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500)) /// std::is_same (false specialization) template <typename A, typename B> struct is_same : false_type {}; /// std::is_same (true specialization) template <typename A> struct is_same<A, A> : true_type {}; /// Helper for std::is_base_of template <typename BaseT, typename DerivedT> struct is_base_of_helper { typedef char (&yes)[1]; typedef char (&no)[2]; template <typename B, typename D> struct dummy { CUTLASS_HOST_DEVICE operator B*() const; CUTLASS_HOST_DEVICE operator D*(); }; template <typename T> CUTLASS_HOST_DEVICE static yes check(DerivedT*, T); CUTLASS_HOST_DEVICE static no check(BaseT*, int); static const bool value = sizeof(check(dummy<BaseT, DerivedT>(), int())) == sizeof(yes); }; /// std::is_base_of template <typename BaseT, typename DerivedT> struct is_base_of : integral_constant<bool, (is_base_of_helper<typename remove_cv<BaseT>::type, typename remove_cv<DerivedT>::type>::value) || (is_same<typename remove_cv<BaseT>::type, typename remove_cv<DerivedT>::type>::value)> {}; #else using std::is_same; using std::is_base_of; #endif //----------------------------------------------------------------------------- // Type properties <type_traits> //----------------------------------------------------------------------------- #if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500)) /// std::is_volatile template <typename T> struct is_volatile : false_type {}; template <typename T> struct is_volatile<volatile T> : true_type {}; /// Helper for std::is_pointer (false specialization) template <typename T> struct is_pointer_helper : false_type {}; /// Helper for std::is_pointer (true specialization) template <typename T> struct is_pointer_helper<T*> : true_type {}; /// std::is_pointer template <typename T> struct is_pointer : is_pointer_helper<typename remove_cv<T>::type> {}; /// std::is_void template <typename T> struct is_void : is_same<void, typename remove_cv<T>::type> {}; /// std::is_integral template <typename T> struct is_integral : false_type {}; template <> struct is_integral<char> : true_type {}; template <> struct is_integral<signed char> : true_type {}; template <> struct is_integral<unsigned char> : true_type {}; template <> struct is_integral<short> : true_type {}; template <> struct is_integral<unsigned short> : true_type {}; template <> struct is_integral<int> : true_type {}; template <> struct is_integral<unsigned int> : true_type {}; template <> struct is_integral<long> : true_type {}; template <> struct is_integral<unsigned long> : true_type {}; template <> struct is_integral<long long> : true_type {}; template <> struct is_integral<unsigned long long> : true_type {}; template <typename T> struct is_integral<volatile T> : is_integral<T> {}; template <typename T> struct is_integral<const T> : is_integral<T> {}; template <typename T> struct is_integral<const volatile T> : is_integral<T> {}; /// std::is_floating_point template <typename T> struct is_floating_point : integral_constant<bool, (is_same<float, typename remove_cv<T>::type>::value || is_same<double, typename remove_cv<T>::type>::value)> {}; /// std::is_arithmetic template <typename T> struct is_arithmetic : integral_constant<bool, (is_integral<T>::value || is_floating_point<T>::value)> {}; /// std::is_fundamental template <typename T> struct is_fundamental : integral_constant<bool, (is_arithmetic<T>::value || is_void<T>::value || is_same<nullptr_t, typename remove_cv<T>::type>::value)> {}; #else using std::is_volatile; using std::is_pointer; using std::is_void; using std::is_integral; using std::is_floating_point; using std::is_arithmetic; using std::is_fundamental; #endif #if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1800)) || \ (defined(__GNUG__) && (__GNUC__ < 5)) /** * std::is_trivially_copyable * * This implementation only evaluates true if T is fundamental or pointer * * Without help from partial template specializations provided by the user for * a specific class or struct, this trait will never report that the specified * class or struct is trivially-copyable ; this is always safe, * if possibly sub-optimal. */ template <typename T> struct is_trivially_copyable : integral_constant<bool, (is_fundamental<T>::value || is_pointer<T>::value)> {}; #else using std::is_trivially_copyable; #endif //----------------------------------------------------------------------------- // bit_cast <bit> //----------------------------------------------------------------------------- template< class To, class From > constexpr To CUTLASS_HOST_DEVICE bit_cast(const From& from ) noexcept; template <class To, class From> constexpr To CUTLASS_HOST_DEVICE bit_cast(const From& src) noexcept { static_assert(sizeof(To) == sizeof(From), "sizes must match"); return reinterpret_cast<To const &>(src); } //----------------------------------------------------------------------------- // Alignment and layout utilities //----------------------------------------------------------------------------- #if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1500)) /// std::alignment_of template <typename value_t> struct alignment_of { struct pad { value_t val; char byte; }; enum { value = sizeof(pad) - sizeof(value_t) }; }; #else template <typename value_t> struct alignment_of : std::alignment_of<value_t> {}; #endif /* 16B specializations where 32-bit Win32 host compiler disagrees with device compiler */ template <> struct alignment_of<int4> { enum { value = 16 }; }; template <> struct alignment_of<uint4> { enum { value = 16 }; }; template <> struct alignment_of<float4> { enum { value = 16 }; }; template <> struct alignment_of<long4> { enum { value = 16 }; }; template <> struct alignment_of<ulong4> { enum { value = 16 }; }; template <> struct alignment_of<longlong2> { enum { value = 16 }; }; template <> struct alignment_of<ulonglong2> { enum { value = 16 }; }; template <> struct alignment_of<double2> { enum { value = 16 }; }; template <> struct alignment_of<longlong4> { enum { value = 16 }; }; template <> struct alignment_of<ulonglong4> { enum { value = 16 }; }; template <> struct alignment_of<double4> { enum { value = 16 }; }; // Specializations for volatile/const qualified types template <typename value_t> struct alignment_of<volatile value_t> : alignment_of<value_t> {}; template <typename value_t> struct alignment_of<const value_t> : alignment_of<value_t> {}; template <typename value_t> struct alignment_of<const volatile value_t> : alignment_of<value_t> {}; #if defined(__CUDACC_RTC__) || (!defined(_MSC_VER) && (__cplusplus < 201103L)) || (defined(_MSC_VER) && (_MSC_VER < 1800)) template <size_t Align> struct aligned_chunk; template <> struct __align__(1) aligned_chunk<1> { uint8_t buff; }; template <> struct __align__(2) aligned_chunk<2> { uint16_t buff; }; template <> struct __align__(4) aligned_chunk<4> { uint32_t buff; }; template <> struct __align__(8) aligned_chunk<8> { uint32_t buff[2]; }; template <> struct __align__(16) aligned_chunk<16> { uint32_t buff[4]; }; template <> struct __align__(32) aligned_chunk<32> { uint32_t buff[8]; }; template <> struct __align__(64) aligned_chunk<64> { uint32_t buff[16]; }; template <> struct __align__(128) aligned_chunk<128> { uint32_t buff[32]; }; template <> struct __align__(256) aligned_chunk<256> { uint32_t buff[64]; }; template <> struct __align__(512) aligned_chunk<512> { uint32_t buff[128]; }; template <> struct __align__(1024) aligned_chunk<1024> { uint32_t buff[256]; }; template <> struct __align__(2048) aligned_chunk<2048> { uint32_t buff[512]; }; template <> struct __align__(4096) aligned_chunk<4096> { uint32_t buff[1024]; }; /// std::aligned_storage template <size_t Len, size_t Align> struct aligned_storage { typedef aligned_chunk<Align> type[Len / sizeof(aligned_chunk<Align>)]; }; #else using std::aligned_storage; #endif #if !defined(__CUDACC_RTC__) /// Default deleter template <typename T> struct default_delete { void operator()(T* ptr) const { delete ptr; } }; /// Partial specialization for deleting array types template <typename T> struct default_delete<T[]> { void operator()(T* ptr) const { delete[] ptr; } }; /// std::unique_ptr template <class T, class Deleter = default_delete<T> > class unique_ptr { public: typedef T* pointer; typedef T element_type; typedef Deleter deleter_type; private: /// Pointer to memory pointer _ptr; /// Deleter deleter_type _deleter; public: unique_ptr() : _ptr(nullptr) {} unique_ptr(pointer p) : _ptr(p) {} ~unique_ptr() { if (_ptr) { _deleter(_ptr); } } /// Returns a pointer to the managed object or nullptr if no object is owned. pointer get() const noexcept { return _ptr; } /// Releases ownership of the managed object, if any pointer release() noexcept { pointer p(_ptr); _ptr = nullptr; return p; } /// Replaces the managed object, deleting the old object. void reset(pointer p = pointer()) noexcept { pointer old_ptr = _ptr; _ptr = p; if (old_ptr != nullptr) { get_deleter()(old_ptr); } } /// Swaps the managed objects with *this and another unique_ptr void swap(unique_ptr& other) noexcept { std::swap(_ptr, other._ptr); } /// Returns the deleter object Deleter& get_deleter() noexcept { return _deleter; } /// Returns the deleter object Deleter const& get_deleter() const noexcept { return _deleter; } /// Checks whether an object is owned operator bool() const noexcept { return _ptr != nullptr; } /// Dereferences the unique_ptr T& operator*() const { return *_ptr; } /// Returns a pointer to the managed object pointer operator->() const noexcept { return _ptr; } /// Array access to managed object T& operator[](size_t i) const { return _ptr[i]; } }; /// Specializes the swap algorithm template <typename T, typename Deleter> void swap(unique_ptr<T, Deleter>& lhs, unique_ptr<T, Deleter>& rhs) noexcept { lhs.swap(rhs); } #endif /// std::numeric_limits template <class T> struct numeric_limits; template <> struct numeric_limits<int32_t> { CUTLASS_HOST_DEVICE static constexpr int32_t lowest() noexcept { return -2147483647 - 1;} CUTLASS_HOST_DEVICE static constexpr int32_t max() noexcept { return 2147483647;} static constexpr bool is_integer = true; }; template <> struct numeric_limits<int16_t> { CUTLASS_HOST_DEVICE static constexpr int16_t lowest() noexcept { return -32768;} CUTLASS_HOST_DEVICE static constexpr int16_t max() noexcept { return 32767;} static constexpr bool is_integer = true; }; template <> struct numeric_limits<int8_t> { CUTLASS_HOST_DEVICE static constexpr int8_t lowest() noexcept { return -128;} CUTLASS_HOST_DEVICE static constexpr int8_t max() noexcept { return 127;} static constexpr bool is_integer = true; }; template <> struct numeric_limits<uint32_t> { CUTLASS_HOST_DEVICE static constexpr uint32_t lowest() noexcept { return 0;} CUTLASS_HOST_DEVICE static constexpr uint32_t max() noexcept { return 4294967295U;} static constexpr bool is_integer = true; }; template <> struct numeric_limits<uint16_t> { CUTLASS_HOST_DEVICE static constexpr uint16_t lowest() noexcept { return 0;} CUTLASS_HOST_DEVICE static constexpr uint16_t max() noexcept { return 65535U;} static constexpr bool is_integer = true; }; template <> struct numeric_limits<uint8_t> { CUTLASS_HOST_DEVICE static constexpr uint8_t lowest() noexcept { return 0;} CUTLASS_HOST_DEVICE static constexpr uint8_t max() noexcept { return 255U;} static constexpr bool is_integer = true; }; template <> struct numeric_limits<float> { CUTLASS_HOST_DEVICE static constexpr float infinity() noexcept { return bit_cast<float, int32_t>(0x7f800000);} static constexpr bool is_integer = false; static constexpr bool has_infinity = true; }; } // namespace platform } // namespace cutlass
26,097
C
28.257848
127
0.579185
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/tile_iterator_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/array.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/epilogue/warp/tensor_op_policy.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename Element, ///< data type of element to be written typename Layout ///< target shared memory layout > class TileIteratorTensorOp; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename Element_ ///< data type of element to be written > class TileIteratorTensorOp<WarpShape_, OperatorShape_, Element_, layout::RowMajor> { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using Element = Element_; using Layout = layout::RowMajor; using TensorLayout = Layout; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< Element, Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>; /// This is the complete warp-level accumulator tile. //using AccumulatorTile = typename Operator::FragmentC; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; /// Number of times this iterator can be incremented using TileIterations = typename Policy::TileIterations; // Internal constants struct Detail { static int const kLanesInQuad = 4; }; /// Padding quantity using Padding = MatrixShape< 0, Detail::kLanesInQuad * Policy::kElementsPerAccess>; private: /// Storage type for accessing memory using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>; // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object Layout layout_; /// Thread offset MatrixCoord thread_offset_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorTensorOp(): pointer_(nullptr) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOp( TensorRef const &ref, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0] / Policy::kElementsPerAccess) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); thread_offset_ = { quad_id, lane_in_quad * Policy::kElementsPerAccess }; pointer_ += layout_({thread_offset_.row(), thread_offset_.column() / Policy::kElementsPerAccess}); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorTensorOp & add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset / Policy::kElementsPerAccess; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOp & add_tile_offset(TensorCoord const &tile_offset) { MatrixCoord coord_offset( tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn ); thread_offset_ += coord_offset; pointer_ += layout_({ coord_offset.row(), coord_offset.column() / Policy::kElementsPerAccess }); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOp & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { pointer_[n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess] = frag_ptr[n]; } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { frag_ptr[n] = pointer_[n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess]; } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } CUTLASS_HOST_DEVICE TileIteratorTensorOp & operator++() { return add_tile_offset({1, 0}); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename Element_, ///< data type of element to be written int InterleavedK ///< number of interleaved k > class TileIteratorTensorOp<WarpShape_, OperatorShape_, Element_, layout::ColumnMajorInterleaved<InterleavedK> > { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using Element = Element_; using Layout = layout::ColumnMajorInterleaved<InterleavedK>; using TensorLayout = Layout; ///< shared memory tensor ref layout using TensorRef = TensorRef<Element, TensorLayout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; /// Shape of the tile in memory using Shape = MatrixShape< // Policy::kRowsPerIteration, WarpShape::kM, InterleavedK >; /// This is the fragment size produced by one tile using Fragment = Array< Element, Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction * Policy::kElementsPerIteration>; /// This is the fragment size produced by one iteration // using Fragment = Array< // Element, Policy::kElementsPerIteration >; /// This is the complete warp-level accumulator tile. //using AccumulatorTile = typename Operator::FragmentC; /// Number of times this iterator can be incremented using TileIterations = typename Policy::TileIterations; // Internal constants struct Detail { static int const kLanesInQuad = 4; }; /// Padding quantity using Padding = MatrixShape< 0, Detail::kLanesInQuad * Policy::kElementsPerIteration>; private: /// Storage type for accessing memory using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>; // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object TensorLayout layout_; /// Thread offset MatrixCoord thread_offset_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorTensorOp(): pointer_(nullptr) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOp( TensorRef const &ref, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0]) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); thread_offset_ = { quad_id, lane_in_quad * Policy::kElementsPerIteration }; pointer_ += (layout_({thread_offset_.row(), thread_offset_.column()}) / Policy::kElementsPerAccess); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorTensorOp & add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset / Policy::kElementsPerAccess; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOp & add_tile_offset(TensorCoord const &tile_offset) { MatrixCoord coord_offset( tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn ); thread_offset_ += coord_offset; pointer_ += (layout_({ coord_offset.row(), coord_offset.column() }) / Policy::kElementsPerAccess); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOp & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction; n++ ) { AccessType *ptr = pointer_ + layout_({n * Policy::kRowsPerIteration, 0}) / Policy::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int a = 0; a < Policy::kAccessPerIteration; ++a) { ptr[a + pointer_offset / Policy::kElementsPerAccess] = frag_ptr[n * Policy::kAccessPerIteration + a]; // printf("store thread %d, address %p, bank %ld\n", threadIdx.x, pointer_+a+n*Detail::kLanesInQuad, // ((long long)(pointer_+a+n*Detail::kLanesInQuad)>>2)&0x1f); } } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction; n++ ) { AccessType *ptr = pointer_ + layout_({n * Policy::kRowsPerIteration, 0}) / Policy::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int a = 0; a < Policy::kAccessPerIteration; ++a) { frag_ptr[n * Policy::kAccessPerIteration + a] = ptr[a + pointer_offset / Policy::kElementsPerAccess]; } } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } CUTLASS_HOST_DEVICE TileIteratorTensorOp & operator++() { return add_tile_offset({0, 1}); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename Element_, ///< data type of element to be written typename Layout_ > class TileIteratorTensorOpCanonical { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using Element = Element_; using Layout = Layout_; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; static int const kAccessSize = 1; static int const kAccessCount = Policy::kElementsPerAccess / kAccessSize; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< Element, Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>; /// This is the complete warp-level accumulator tile. //using AccumulatorTile = typename Operator::FragmentC; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; // Internal constants struct Detail { static int const kLanesInQuad = 4; }; /// Padding quantity using Padding = MatrixShape< 0, Detail::kLanesInQuad * Policy::kElementsPerAccess>; private: /// Storage type for accessing memory using AccessType = AlignedArray<Element, kAccessSize>; // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object Layout layout_; /// Guard to indicate whether the shape is divisible bool divisible_; /// Extent of the output tensor MatrixCoord extent_; /// Thread offset MatrixCoord thread_offset_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical(): pointer_(nullptr) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical( TensorRef const &ref, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0]), divisible_(true), extent_(WarpShape::kM, WarpShape::kN) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); thread_offset_ = { quad_id, lane_in_quad * Policy::kElementsPerAccess }; pointer_ += layout_({thread_offset_.row(), thread_offset_.column()}); } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical( TensorRef const &ref, TensorCoord const &extent, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0]), divisible_(false), extent_(extent) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); thread_offset_ = { quad_id, lane_in_quad * Policy::kElementsPerAccess }; pointer_ += layout_({thread_offset_.row(), thread_offset_.column()}); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical & add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical & add_tile_offset(TensorCoord const &tile_offset) { MatrixCoord coord_offset( tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn ); thread_offset_ += coord_offset; pointer_ += layout_({ coord_offset.row(), coord_offset.column() }); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { CUTLASS_PRAGMA_UNROLL for (int a = 0; a < kAccessCount; ++a) { int ptr_idx = n * Detail::kLanesInQuad * kAccessCount + pointer_offset + a; int frag_idx = n * kAccessCount + a; int col = thread_offset_.column() + n * Detail::kLanesInQuad * Policy::kElementsPerAccess + a; if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) { pointer_[ptr_idx] = frag_ptr[frag_idx]; } } } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { CUTLASS_PRAGMA_UNROLL for (int a = 0; a < kAccessCount; ++a) { int ptr_idx = n * Detail::kLanesInQuad * kAccessCount + pointer_offset + a; int frag_idx = n * kAccessCount + a; int col = thread_offset_.column() + n * Detail::kLanesInQuad * Policy::kElementsPerAccess + a; if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) { frag_ptr[frag_idx] = pointer_[ptr_idx]; } } } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical & operator++() { return add_tile_offset({1, 0}); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
20,290
C
29.19494
109
0.652785
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/fragment_iterator_simt.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile that participate in one warp-level store operation. Typically, the accumulator tile is the largest single block of register-backed storage within the kernel. Storing it to memory is best accomplished by partitioning it into smaller tiles and storing these sequentially. Round trips through shared memory during the Epilogue phase require partitioning, as shared memory capacity is typically insufficient for a threadblock's total accumulator size. */ #pragma once #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/epilogue/warp/simt_policy.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Fragment iterator for SIMT accumulator arrangements template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename Operator, ///< matrix multiply operation (concept: arch::Mma) typename Layout, ///< target shared memory layout typename MmaSimtPolicy ///< policy defining lane arrangement (concept: MmaSimtPolicy) > class FragmentIteratorSimt; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major shared memory template < typename WarpShape_, ///< shape of the warp-level GEMM tile typename Operator_ , ///< matrix multiply operator (concept: arch::Mma) typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy) > class FragmentIteratorSimt<WarpShape_, Operator_, layout::RowMajor, MmaSimtPolicy_> { public: using WarpShape = WarpShape_; using Operator = Operator_; using Layout = layout::RowMajor; /// Policy for warp-level epilogue components using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< typename Operator::ElementC, Policy::kElementsPerIteration>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = Array< typename Operator::ElementC, Policy::kAccumulatorElementCount>; using OutputAccumulatorTile = AccumulatorTile; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; private: /// Internal access type using AccessType = Array<typename Operator::ElementC, Policy::kElementsPerAccess>; private: // // Data members // /// Accumulator tile AccessType const *accumulators_; /// Internal index int index_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE FragmentIteratorSimt(AccumulatorTile const &accum): accumulators_(reinterpret_cast<AccessType const *>(&accum)), index_(0) { } /// Increments CUTLASS_HOST_DEVICE FragmentIteratorSimt &operator++() { ++index_; return *this; } /// Decrements CUTLASS_HOST_DEVICE FragmentIteratorSimt &operator--() { --index_; return *this; } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag, int index_offset = 0) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::kAccessesPerIteration; ++n) { int accumulator_access_offset = index_ * Policy::kAccessesPerIteration + n; frag_ptr[n] = accumulators_[accumulator_access_offset]; } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
5,880
C
34.642424
100
0.644728
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/volta_tensor_op_policy.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic structures needed for implementing the warp-scoped phase of the epilogue. These quantities assume a 'column-major' arrangement of TensorOp instructions, of which a row-oriented slice is visible per iteration. */ #pragma once #include "cutlass/matrix_shape.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/gemm.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Policy details related to the epilogue template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename InterleavedTileShape, ///< shape of indivisible instruction-level arrangement (concept: GemmShape) typename ElementC, ///< Accumulator layout typename Layout ///< target shared memory layout > struct VoltaTensorOpPolicy; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major template < typename WarpShape_ ///< shape of warp-level GEMM (concept: GemmShape) > struct VoltaTensorOpPolicy<WarpShape_, gemm::GemmShape<32, 32, 4>, half_t, layout::RowMajor> { using WarpShape = WarpShape_; using InterleavedTileShape = gemm::GemmShape<32, 32, 4>; using ElementC = half_t; using Layout = layout::RowMajor; /// Shape of one warp-levelinstruction using InstructionShape = gemm::GemmShape<16, 16, 4>; /// Number of mma operations performed for one 32x32x4 interleaved tile using MmaIterations = MatrixShape< InterleavedTileShape::kM / InstructionShape::kM, InterleavedTileShape::kN / InstructionShape::kN >; /// Number of 32x32x4 interleaved tiles performed to cover the warp-level GEMM shape using TileIterations = MatrixShape< WarpShape::kM / InterleavedTileShape::kM, WarpShape::kN / InterleavedTileShape::kN >; /// Number of accumulator elements owned by each thread per Mma static int const kElementsPerMma = 8; static int const kRowsPerIteration = 16; // // Hard-coded constants regarding Tensor Operations // /// Number of accumulator elements stored per memory instruction to shared memory static int const kElementsPerAccess = 4; /// Number of accesses performed per interleaved tile static int const kAccessesPerInterleavedTile = 4; /// Total number of iterations needed to cover the entire tile static int const kIterations = TileIterations::kRow * 2; // // Derived types // /// Array type for aligned memory accesses using AccessType = AlignedArray<ElementC, kElementsPerAccess>; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< ElementC, kElementsPerAccess * kAccessesPerInterleavedTile * TileIterations::kColumn>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = Array< ElementC, TileIterations::kCount * MmaIterations::kCount * kElementsPerMma>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major template < typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape) > struct VoltaTensorOpPolicy<WarpShape_, gemm::GemmShape<32, 32, 4>, float, layout::RowMajor> { using WarpShape = WarpShape_; using InterleavedTileShape = gemm::GemmShape<32, 32, 4>; using ElementC = float; using Layout = layout::RowMajor; /// Shape of one warp-levelinstruction using InstructionShape = gemm::GemmShape<16, 16, 4>; /// Number of mma operations performed for one 32x32x4 interleaved tile using MmaIterations = MatrixShape< InterleavedTileShape::kM / InstructionShape::kM, InterleavedTileShape::kN / InstructionShape::kN >; /// Number of 32x32x4 interleaved tiles performed to cover the warp-level GEMM shape using TileIterations = MatrixShape< WarpShape::kM / InterleavedTileShape::kM, WarpShape::kN / InterleavedTileShape::kN >; /// Number of accumulator elements owned by each thread per Mma static int const kElementsPerMma = 8; static int const kRowsPerIteration = 16; // // Hard-coded constants regarding Tensor Operations // /// Number of accumulator elements stored per memory instruction to shared memory static int const kElementsPerAccess = 2; /// Number of accesses performed per interleaved tile static int const kAccessesPerInterleavedTile = 8; /// Number of rows per interleaved tile static int const kRowsPerMmaTile = 2; /// Total number of iterations needed to cover the entire tile static int const kIterations = TileIterations::kRow * MmaIterations::kRow; // // Derived types // /// Array type for aligned memory accesses using AccessType = AlignedArray<ElementC, kElementsPerAccess>; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< ElementC, kElementsPerAccess * kAccessesPerInterleavedTile * TileIterations::kColumn>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = Array< ElementC, TileIterations::kCount * MmaIterations::kCount * kElementsPerMma>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
7,485
C
37.193877
110
0.661323
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile that participate in one warp-level store operation. Typically, the accumulator tile is the largest single block of register-backed storage within the kernel. Storing it to memory is best accomplished by partitioning it into smaller tiles and storing these sequentially. Round trips through shared memory during the Epilogue phase require partitioning, as shared memory capacity is typically insufficient for a threadblock's total accumulator size. */ #pragma once #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/epilogue/warp/tensor_op_policy.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { //////////////////////////////////////////////////////////////////////////////// /// template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename OperatorElementC, ///< matrix multiply operation data type (concept: data type) typename OperatorFragmentC, ///< matrix multiply operation fragment (concept: Array) typename Layout ///< target shared memory layout > class FragmentIteratorComplexTensorOp; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major shared memory template < typename WarpShape_, ///< shape of the warp-level GEMM tile typename OperatorShape_, ///< underlying real-valued matrix multiply operation shape (concept: gemm::GemmShape) typename OperatorElementC_, ///< underlying real-valued matrix multiply operation data type typename OperatorFragmentC_ ///< underlying real-valued matrix multiply operation fragment (concept: Array) > class FragmentIteratorComplexTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_, layout::RowMajor> { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using OperatorElementC = OperatorElementC_; using OperatorFragmentC = OperatorFragmentC_; using Layout = layout::RowMajor; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< complex<OperatorElementC>, Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>; static int const kRealIndex = 0; /// Offset into the accumulator fragment static int const kImaginaryIndex = OperatorFragmentC::kElements * Policy::OperatorCount::kRow * Policy::OperatorCount::kColumn; /// This is the complete warp-level accumulator tile. using AccumulatorTile = Array<OperatorElementC, 2 * kImaginaryIndex>; /// This is the complete warp-level accumulator tile. using OutputAccumulatorTile = Array<complex<OperatorElementC>, kImaginaryIndex>; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; private: /// Internal access type using AccessType = Array<OperatorElementC, Policy::kElementsPerAccess>; using FragmentAccessType = Array<complex<OperatorElementC>, Policy::kElementsPerAccess>; private: // // Data members // /// Accumulator tile AccessType const *accumulators_; /// Internal index int index_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE FragmentIteratorComplexTensorOp(AccumulatorTile const &accum): accumulators_(reinterpret_cast<AccessType const *>(&accum)), index_(0) { } /// Increments CUTLASS_HOST_DEVICE FragmentIteratorComplexTensorOp &operator++() { ++index_; return *this; } /// Decrements CUTLASS_HOST_DEVICE FragmentIteratorComplexTensorOp &operator--() { --index_; return *this; } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag, int index_offset = 0) const { int index = index_ + index_offset; FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { int accumulator_access_offset = index + n * Policy::kAccumulatorColumnStride / Policy::kElementsPerAccess; auto const & real_accum_array = accumulators_[accumulator_access_offset + kRealIndex]; auto const & imag_accum_array = accumulators_[accumulator_access_offset + kImaginaryIndex / Policy::kElementsPerAccess]; // Pack real and imaginary parts into a structure. This is likely to result in MOVs CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Policy::kElementsPerAccess; ++i) { frag_ptr[n][i].real() = real_accum_array[i]; frag_ptr[n][i].imag() = imag_accum_array[i]; } } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
7,055
C
36.531915
126
0.672998
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/tile_iterator_volta_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/epilogue/warp/tensor_op_policy.h" #include "cutlass/epilogue/warp/volta_tensor_op_policy.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename InterleavedTileShape, ///< shape of indivisible instruction-level arrangement (concept: GemmShape) typename ElementC, ///< Accumulator layout typename Layout ///< target shared memory layout > struct TileIteratorVoltaTensorOp; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape) > struct TileIteratorVoltaTensorOp<WarpShape_, gemm::GemmShape<32, 32, 4>, half_t, layout::RowMajor> { public: using WarpShape = WarpShape_; using InterleavedTileShape = gemm::GemmShape<32, 32, 4>; using Element = half_t; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = VoltaTensorOpPolicy<WarpShape, InterleavedTileShape, Element, Layout>; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// Array type for aligned memory accesses using AccessType = typename Policy::AccessType; /// This is the fragment size produced by one access of the iterator. using Fragment = typename Policy::Fragment; /// This is the complete warp-level accumulator tile. using AccumulatorTile = typename Policy::AccumulatorTile; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; /// Number of elements per access static int const kElementsPerAccess = Policy::kElementsPerAccess; // Internal constants struct Detail { static int const kLanesInQuad = 4; static int const kRowsPerQuad = 4; static int const kColumnsPerQuad = 8; static int const kAccessesPerQuad = kColumnsPerQuad / Policy::kElementsPerAccess; static int const kAccessQuadDelta = 16; }; /// Padding quantity using Padding = MatrixShape< 0, Policy::kElementsPerAccess>; private: // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object Layout layout_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorVoltaTensorOp(): pointer_(nullptr) { } /// Constructor from TensorRef CUTLASS_DEVICE TileIteratorVoltaTensorOp( TensorRef const &ref, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0] / Policy::kElementsPerAccess) { int quad_id = lane_id / Detail::kLanesInQuad; int lane_in_quad = (lane_id % Detail::kLanesInQuad); int quad_row_idx = ((quad_id & 4) >> 1) + (quad_id & 1); int quad_col_idx = ((quad_id & 2) >> 1); int row = quad_row_idx * Detail::kRowsPerQuad + lane_in_quad; int column = quad_col_idx * Detail::kColumnsPerQuad; pointer_ += layout_({row, column / kElementsPerAccess}); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorVoltaTensorOp & add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset / Policy::kElementsPerAccess; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorVoltaTensorOp & add_tile_offset(TensorCoord const &tile_offset) { pointer_ += layout_({ tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn / Policy::kElementsPerAccess}); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorVoltaTensorOp & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int tile_idx = 0; tile_idx < Policy::TileIterations::kColumn; ++tile_idx) { CUTLASS_PRAGMA_UNROLL for (int access_idx = 0; access_idx < Policy::kAccessesPerInterleavedTile; ++access_idx) { int access_quad = access_idx / 2; int access = access_idx % 2; int ptr_offset = tile_idx * InterleavedTileShape::kN / Policy::kElementsPerAccess + access_quad * Detail::kAccessQuadDelta / Policy::kElementsPerAccess + access + pointer_offset / Policy::kElementsPerAccess; int frag_idx = tile_idx * Policy::kAccessesPerInterleavedTile + access_idx; AccessType access_vector = frag_ptr[frag_idx]; pointer_[ptr_offset] = access_vector; } } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int tile_idx = 0; tile_idx < Policy::TileIterations::kColumn; ++tile_idx) { CUTLASS_PRAGMA_UNROLL for (int access_idx = 0; access_idx < Policy::kAccessesPerInterleavedTile; ++access_idx) { int access_quad = access_idx / 2; int access = access_idx % 2; int ptr_offset = tile_idx * Detail::kTileDelta + access_quad * Detail::kAccessQuadDelta + access + pointer_offset / Policy::kElementsPerAccess; int frag_idx = tile_idx * Policy::kAccessesPerInterleavedTile + access_idx; frag_ptr[frag_idx] = pointer_[ptr_offset]; } } } /// Load CUTLASS_HOST_DEVICE void load(Fragment const &frag) { load_with_pointer_offset(frag, 0); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape) > struct TileIteratorVoltaTensorOp<WarpShape_, gemm::GemmShape<32, 32, 4>, float, layout::RowMajor> { public: using WarpShape = WarpShape_; using InterleavedTileShape = gemm::GemmShape<32, 32, 4>; using Element = float; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = VoltaTensorOpPolicy<WarpShape, InterleavedTileShape, Element, Layout>; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// Array type for aligned memory accesses using AccessType = typename Policy::AccessType; /// This is the fragment size produced by one access of the iterator. using Fragment = typename Policy::Fragment; /// This is the complete warp-level accumulator tile. using AccumulatorTile = typename Policy::AccumulatorTile; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; /// Number of elements per access static int const kElementsPerAccess = Policy::kElementsPerAccess; // Internal constants struct Detail { static int const kLanesInQuad = 4; static int const kRowsPerQuad = 4; static int const kColumnsPerQuad = 8; static int const kAccessesPerQuad = kColumnsPerQuad / Policy::kElementsPerAccess; static int const kAccessQuadDelta = 16; }; /// Padding quantity using Padding = MatrixShape< 0, Policy::kElementsPerAccess>; private: // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object Layout layout_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorVoltaTensorOp(): pointer_(nullptr) { } /// Constructor from TensorRef CUTLASS_DEVICE TileIteratorVoltaTensorOp( TensorRef const &ref, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0] / Policy::kElementsPerAccess) { int quad_id = lane_id / Detail::kLanesInQuad; int lane_in_quad = (lane_id % Detail::kLanesInQuad); int const kQuadRowDelta = 4; int const kQuadColumnDelta = 2 * Policy::MmaIterations::kColumn; int quad_row_offset = ((quad_id & 4) / 2 + (quad_id & 1)) * kQuadRowDelta; int quad_column_offset = (quad_id & 2) / 2 * kQuadColumnDelta; int thread_row_offset = (lane_in_quad & 1); int thread_column_offset = (lane_in_quad & 2) / 2; int row = quad_row_offset + thread_row_offset; int column = quad_column_offset + thread_column_offset; pointer_ += layout_({row, column}); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorVoltaTensorOp & add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset / Policy::kElementsPerAccess; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorVoltaTensorOp & add_tile_offset(TensorCoord const &tile_offset) { pointer_ += layout_({ tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn / Policy::kElementsPerAccess}); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorVoltaTensorOp & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); int const kAccessesPerRow = Policy::TileIterations::kColumn * Policy::MmaIterations::kColumn * 2; CUTLASS_PRAGMA_UNROLL for (int row_idx = 0; row_idx < Policy::kRowsPerMmaTile; ++row_idx) { CUTLASS_PRAGMA_UNROLL for (int access_idx = 0; access_idx < kAccessesPerRow; ++access_idx) { int frag_idx = row_idx * kAccessesPerRow + access_idx; int ptr_column_offset = (access_idx & 1) * 2 + (access_idx & 2) * Policy::MmaIterations::kColumn * 2 + (access_idx & 4) * Policy::MmaIterations::kColumn * 2; int ptr_row_offset = row_idx * 2; int ptr_offset = layout_({ptr_row_offset, ptr_column_offset}) + pointer_offset / Policy::kElementsPerAccess; pointer_[ptr_offset] = frag_ptr[frag_idx]; } } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); assert(0); // TODO } /// Load CUTLASS_HOST_DEVICE void load(Fragment const &frag) { load_with_pointer_offset(frag, 0); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
14,258
C
31.333333
116
0.655842
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/wmma_tensor_op_policy.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic structures needed for implementing the warp-scoped phase of the epilogue. These quantities assume a 'column-major' arrangement of TensorOp instructions, of which a row-oriented slice is visible per iteration. */ #pragma once #include "cutlass/arch/wmma.h" #include "cutlass/matrix_shape.h" #include "cutlass/layout/matrix.h" #if defined(CUTLASS_ARCH_WMMA_ENABLED) //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { //////////////////////////////////////////////////////////////////////////////// /// Policy details related to the epilogue template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename OperatorShape, ///< matrix multiply operation shape (concept: gemm:GemmShape) typename Layout ///< target shared memory layout > struct WmmaTensorOpPolicy; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename OperatorShape ///< matrix multiply operation shape (concept: gemm::GemmShape) > struct WmmaTensorOpPolicy<WarpShape, OperatorShape, layout::RowMajor> { /// Number of operations using OperatorCount = MatrixShape< WarpShape::kM / OperatorShape::kM, WarpShape::kN / OperatorShape::kN >; // // Hard-coded constants regarding Tensor Operations // static int const kElementsPerAccess = 2; static int const kRowsPerIteration = OperatorShape::kM; static int const kWmmaFragmentsPerAccess = 1; // // Derived quantities // // Number of externally visible iterations static int const kIterations = OperatorCount::kRow; }; //////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass //////////////////////////////////////////////////////////////////////////////// #endif
3,916
C
37.40196
100
0.632022
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/tile_iterator_wmma_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #if !(defined(__clang__) && defined(__CUDA__)) #include "cutlass/cutlass.h" #include "cutlass/wmma_array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/tensor_ref.h" #include "cutlass/epilogue/warp/wmma_tensor_op_policy.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename OperatorFragment, ///< wmma fragment to be written (concept: nvcuda::wmma::fragment) typename Layout ///< target shared memory layout > class TileIteratorWmmaTensorOp; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename OperatorFragment_ ///< wmma fragment to be written (concept: nvcuda::wmma::fragment) > class TileIteratorWmmaTensorOp<WarpShape_, OperatorShape_, OperatorFragment_, layout::RowMajor> { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using OperatorFragment = OperatorFragment_; using Layout = layout::RowMajor; // // Derived types // using WmmaDataType = typename OperatorFragment::element_type; using Element = typename cutlass::arch::WmmaToCutlassDataType<WmmaDataType>::Type; ///< Data Type of element stored in nvcuda::wmma::frament using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = WmmaTensorOpPolicy<WarpShape, OperatorShape, Layout>; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// This is the fragment size produced by one access of the iterator. using Fragment = WmmaFragmentArray<OperatorFragment, Policy::OperatorCount::kColumn * Policy::kWmmaFragmentsPerAccess>; /// This is the complete warp-level accumulator tile. //using AccumulatorTile = typename Operator::FragmentC; /// Padding quantity // (Epilogue shared memory padding for WMMA Gemm kernel is set to run optimaly on Turing) using Padding = MatrixShape< 0, 4 * Policy::kElementsPerAccess >; private: /// Storage type for accessing memory //using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>; // // Data members // /// Internal pointer to shared memory TensorRef ref_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorWmmaTensorOp(): ref_(nullptr) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorWmmaTensorOp( TensorRef const &ref, unsigned lane_id ): ref_(ref) { } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorWmmaTensorOp & add_pointer_offset(Index pointer_offset) { ref_.add_pointer_offset(pointer_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorWmmaTensorOp & add_tile_offset(TensorCoord const &tile_offset) { ref_.add_coord_offset({tile_offset.row() * OperatorShape::kM, tile_offset.column() * WarpShape::kN}); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorWmmaTensorOp & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { for(int n=0; n < Policy::OperatorCount::kColumn; n++) { WmmaDataType* ptr = reinterpret_cast<WmmaDataType*> (ref_.data() + ref_.offset({0, n * OperatorShape::kN}) + pointer_offset); nvcuda::wmma::store_matrix_sync( ptr, frag[n], ref_.stride()[0], nvcuda::wmma::layout_t::mem_row_major ); } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { for(int n=0; n < Policy::OperatorCount::kColumn; n++) { WmmaDataType* ptr = reinterpret_cast<WmmaDataType*> (ref_.data() + ref_.offset({0, n * OperatorShape::kN}) + pointer_offset); nvcuda::wmma::load_matrix_sync( frag[n], ptr, ref_.stride()[0], nvcuda::wmma::layout_t::mem_row_major ); } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////// #endif // !defined(__clang__)
7,704
C
32.79386
151
0.635254
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/tile_iterator_simt.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/epilogue/warp/simt_policy.h" #define CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES 1 ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename Operator, ///< matrix multiply operation (concept: arch::Mma) typename Element, ///< data type of element to be written typename Layout, ///< target shared memory layout typename MmaSimtPolicy ///< policy defining lane arrangement (concept: MmaSimtPolicy) > class TileIteratorSimt; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename Operator_, ///< matrix multiply operation (concept: arch::Mma) typename Element_, ///< data type of element to be written typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy) > class TileIteratorSimt<WarpShape_, Operator_, Element_, layout::RowMajor, MmaSimtPolicy_> { public: using WarpShape = WarpShape_; using Operator = Operator_; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< typename Operator::ElementC, Policy::kElementsPerIteration>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = Array< typename Operator::ElementC, Policy::kAccumulatorElementCount>; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; /// Padding quantity using Padding = MatrixShape< 0, 4 * Policy::kElementsPerAccess #if CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES + 1 #endif >; private: #if CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES /// Storage type for accessing memory using AccessType = AlignedArray< Element, 1 >; #else /// Storage type for accessing memory using AccessType = AlignedArray< Element, Policy::kElementsPerAccess >; #endif // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object Layout layout_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorSimt(): pointer_(nullptr) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorSimt( TensorRef const &ref, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0] / AccessType::kElements) { auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id); pointer_ += layout_({ lane_offset.row(), lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements) }); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorSimt & add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset / AccessType::kElements; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorSimt & add_tile_offset(TensorCoord const &tile_offset) { pointer_ += layout_({ tile_offset.row() * Shape::kRow, (tile_offset.column() * Shape::kColumn / int(AccessType::kElements)) }); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorSimt & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { #if CUTLASS_SIMT_EPILOGUE_USE_SCALAR_STORES // de-vectorized stores using ScalarAccessType = AlignedArray<Element, 1>; ScalarAccessType const *scalarFragPtr = reinterpret_cast<ScalarAccessType const *>(&frag); ScalarAccessType *scalarPointer = reinterpret_cast<ScalarAccessType *>(pointer_) + pointer_offset; CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::kAccessesPerIteration; ++n) { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::kElementsPerAccess; s++) { scalarPointer[n * Policy::MmaSimtPolicy::WarpShape::kColumn * Policy::kElementsPerAccess + s] = scalarFragPtr[n * Policy::kElementsPerAccess + s]; } } #else // original vector stores AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::kAccessesPerIteration; ++n) { pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)] = frag_ptr[n]; } #endif } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::kAccessesPerIteration; ++n) { frag_ptr[n] = pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)]; } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template <typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename Operator_, ///< matrix multiply operation (concept: arch::Mma) typename Element_, ///< data type of element to be written typename Layout_, ///< target shared memory layout typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy) > class TileIteratorSimtDirectConv { public: using WarpShape = WarpShape_; using Operator = Operator_; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>; /// Shape of the tile in memory using Shape = MatrixShape<Policy::kRowsPerIteration, WarpShape::kN>; /// This is the fragment size produced by one access of the iterator. using Fragment = Array<typename Operator::ElementC, Policy::kElementsPerIteration>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = Array<typename Operator::ElementC, Policy::kAccumulatorElementCount>; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; /// Padding quantity using Padding = MatrixShape<0, 0 >; private: /// Storage type for accessing memory using AccessType = AlignedArray< Element, Policy::kElementsPerAccess >; // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object Layout layout_; /// Base smem offset; Index base_smem_address_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorSimtDirectConv() : pointer_(nullptr) {} /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorSimtDirectConv( TensorRef const &ref, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0] / AccessType::kElements) { auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id); pointer_ += layout_({ lane_offset.row(), lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements) }); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorSimtDirectConv & add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset / AccessType::kElements; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorSimtDirectConv & add_tile_offset(TensorCoord const &tile_offset) { pointer_ += layout_({ tile_offset.row() * Shape::kRow, (tile_offset.column() * Shape::kColumn / int(AccessType::kElements)) }); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorSimtDirectConv & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { // original vector stores AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); AccessType * load_pointer_ = reinterpret_cast<AccessType *>(reinterpret_cast<uint8_t *>(pointer_) + base_smem_address_); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::kAccessesPerIteration; ++n) { load_pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)] = frag_ptr[n]; } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::kAccessesPerIteration; ++n) { frag_ptr[n] = pointer_[n * Policy::MmaSimtPolicy::WarpShape::kColumn + pointer_offset / int(AccessType::kElements)]; } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address){ base_smem_address_ = address; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template <typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename ThreadOutputShape_, /// Size of the matrix to load (concept: TensorNHWC) typename ThreadBlockOutputShape_, /// Size of the matrix to load (concept: TensorNHWC) typename Operator_, ///< matrix multi ply operation (concept: arch::Mma) typename Element_, ///< data type of element to be written typename Layout_, ///< target shared memory layout typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy) > class TileIteratorSimtDirect2dConv { public: using WarpShape = WarpShape_; using ThreadOutputShape = ThreadOutputShape_; using ThreadBlockOutputShape = ThreadBlockOutputShape_; using Operator = Operator_; using Element = Element_; using Layout = layout::RowMajor; using MmaSimtPolicy = MmaSimtPolicy_; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; // Thread-level shape of a fragment using ThreadShape = MatrixShape<ThreadOutputShape::kNHW, ThreadOutputShape::kC>; static_assert(!(ThreadShape::kColumn % MmaSimtPolicy::LaneMmaShape::kN), "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); using ThreadTileCount = MatrixShape<ThreadBlockOutputShape::kH / ThreadOutputShape::kH, ThreadBlockOutputShape::kW / ThreadOutputShape::kW>; using Iterations = MatrixShape<ThreadShape::kRow, ThreadShape::kColumn / MmaSimtPolicy::LaneMmaShape::kN>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = typename Operator::FragmentC; /// This is the fragment size produced by one access of the iterator. using Fragment = AccumulatorTile; /// Padding quantity using Padding = MatrixShape<0, 0>; private: // Storage type for accessing memory using AccessType = AlignedArray<Element, MmaSimtPolicy::LaneMmaShape::kN>; // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object Layout layout_; /// Base smem offset; Index base_smem_address_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorSimtDirect2dConv() : pointer_(nullptr) {} /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorSimtDirect2dConv(TensorRef const &ref, unsigned thread_id, unsigned lane_id) : pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0] / AccessType::kElements) { auto lane_layout = MmaSimtPolicy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id); // Get base HW offset of current threads const int threadgroup = thread_id / (ThreadBlockOutputShape::kC / ThreadOutputShape::kC); const int base_p = (threadgroup / (ThreadTileCount::kColumn)) * ThreadOutputShape::kH; const int base_q = (threadgroup % (ThreadTileCount::kColumn)) * ThreadOutputShape::kW; const int row_offset = base_p * ThreadBlockOutputShape::kW + base_q; pointer_ += layout_( {row_offset, lane_offset.column() * MmaSimtPolicy::LaneMmaShape::kN / int(AccessType::kElements)}); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorSimtDirect2dConv &add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset / AccessType::kElements; return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType *storer_pointer_ = reinterpret_cast<AccessType *>(reinterpret_cast<uint8_t *>(pointer_) + base_smem_address_); AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int h = 0; h < ThreadOutputShape::kH; ++h) { CUTLASS_PRAGMA_UNROLL for (int w = 0; w < ThreadOutputShape::kW; ++w) { CUTLASS_PRAGMA_UNROLL for (int col = 0; col < Iterations::kColumn; ++col) { int offset = (w + h * ThreadBlockOutputShape::kW) * (ThreadBlockOutputShape::kC / AccessType::kElements) + col; storer_pointer_[offset + pointer_offset / int(AccessType::kElements)] = frag_ptr[w + h * ThreadOutputShape::kW + col]; } } } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { base_smem_address_ = address; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename Operator_, ///< matrix multiply operation (concept: arch::Mma) typename Element_, ///< data type of element to be written typename Layout_, ///< target shared memory layout typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy) > class TileIteratorSimtCanonical { public: using WarpShape = WarpShape_; using Operator = Operator_; using Element = Element_; using Layout = Layout_; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< typename Operator::ElementC, Policy::kElementsPerIteration>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = Array< typename Operator::ElementC, Policy::kAccumulatorElementCount>; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; /// Padding quantity using Padding = MatrixShape< 0, 4 * Policy::kElementsPerAccess + 1 >; private: /// Storage type for accessing memory using AccessType = AlignedArray< Element, 1 >; // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object Layout layout_; /// Guard to indicate whether the shape is divisible bool divisible_; /// Extent of the output tensor MatrixCoord extent_; /// Thread offset MatrixCoord thread_offset_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorSimtCanonical(): pointer_(nullptr) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorSimtCanonical( TensorRef const &ref, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0] / AccessType::kElements), divisible_(true), extent_(WarpShape::kM, WarpShape::kN) { auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id); thread_offset_ = { lane_offset.row() * Shape::kRow, lane_offset.column() * Policy::kElementsPerAccess }; pointer_ += layout_({ lane_offset.row() * Shape::kRow, lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements) }); } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorSimtCanonical( TensorRef const &ref, TensorCoord const &extent, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0] / AccessType::kElements), divisible_(false), extent_(extent) { auto lane_layout = Policy::MmaSimtPolicy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id); thread_offset_ = { lane_offset.row() * Shape::kRow, lane_offset.column() * Policy::kElementsPerAccess }; pointer_ += layout_({ lane_offset.row() * Shape::kRow, lane_offset.column() * Policy::kElementsPerAccess / int(AccessType::kElements) }); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorSimtCanonical & add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset / AccessType::kElements; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorSimtCanonical & add_tile_offset(TensorCoord const &tile_offset) { MatrixCoord coord_offset( tile_offset.row(), tile_offset.column() * Shape::kColumn ); thread_offset_ += coord_offset; pointer_ += layout_({ coord_offset.row(), coord_offset.column() }); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorSimtCanonical & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { // de-vectorized stores using ScalarAccessType = AlignedArray<Element, 1>; ScalarAccessType const *scalarFragPtr = reinterpret_cast<ScalarAccessType const *>(&frag); ScalarAccessType *scalarPointer = reinterpret_cast<ScalarAccessType *>(pointer_) + pointer_offset; CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::kAccessesPerIteration; ++n) { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::kElementsPerAccess; s++) { int ptr_idx = n * Policy::MmaSimtPolicy::WarpShape::kColumn * Policy::kElementsPerAccess + s; int frag_idx = n * Policy::kElementsPerAccess + s; int col = thread_offset_.column() + ptr_idx; if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) { scalarPointer[ptr_idx] = scalarFragPtr[frag_idx]; } } } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { // de-vectorized loads using ScalarAccessType = AlignedArray<Element, 1>; ScalarAccessType *scalarFragPtr = reinterpret_cast<ScalarAccessType *>(&frag); ScalarAccessType const *scalarPointer = reinterpret_cast<ScalarAccessType const*>(pointer_) + pointer_offset; CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::kAccessesPerIteration; ++n) { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < Policy::kElementsPerAccess; s++) { int ptr_idx = n * Policy::MmaSimtPolicy::WarpShape::kColumn * Policy::kElementsPerAccess + s; int frag_idx = n * Policy::kElementsPerAccess + s; int col = thread_offset_.column() + ptr_idx; if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) { scalarFragPtr[frag_idx] = scalarPointer[ptr_idx]; } } } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } CUTLASS_HOST_DEVICE TileIteratorSimtCanonical & operator++() { return add_tile_offset({1, 0}); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; } // namespace warp } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
25,658
C
31.645038
156
0.653519
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/epilogue/warp/tensor_op_policy.h" ///////////////////////////////////////////////////////////////////////////////////////////////// // This is an optimization available on CUDA 11.2 and beyond that eliminates branches in the epilogue. #define CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED ((__CUDACC_VER_MAJOR__ * 10 + __CUDACC_VER_MINOR__) >= 112) ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory. This is optimized /// for mixed-precision epilogues in which the accumulators are 32b in width, but the output /// data type is smaller. template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename Element_, ///< data type of accumulator element int ElementSizeBits, ///< Size of accumulator element in bits int OutputSizeBits, ///< Size of output element in bits int OutputElementCount, ///< number of elements in output vector int ContiguousLanes ///< Number of consecutive lanes writing to contiguous memory > class TileIteratorTensorOpMixed { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using Element = Element_; using Layout = layout::RowMajor; static int const kOutputElementCount = OutputElementCount; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< Element, Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>; /// This is the complete warp-level accumulator tile. //using AccumulatorTile = typename Operator::FragmentC; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; // Internal constants struct Detail { static int const kLanesInQuad = 4; /// Number of pointers needed to write accumulators static int const kPointerCount = (OutputElementCount * sizeof_bits<Element>::value) / (const_min(128, OutputElementCount * sizeof_bits<Element>::value)); static_assert(kPointerCount <= 4, "Can only accommodate four pointers at present."); static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32)."); }; /// Padding quantity using Padding = MatrixShape< 0, Detail::kLanesInQuad * Policy::kElementsPerAccess>; private: /// Storage type for accessing memory using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>; // // Data members // /// Internal pointer to memory AccessType *pointers_[Detail::kPointerCount]; /// Stride in units of AccessType int stride_; /// Logical column in which warp tile is aligned int warp_column_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed() { CUTLASS_PRAGMA_UNROLL for (int64_t i = 0; i < Detail::kPointerCount; ++i) { pointers_[i] = nullptr; } } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed( TensorRef const &ref, unsigned lane_id ): stride_(ref.stride()[0] / Policy::kElementsPerAccess), warp_column_(0) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); CUTLASS_PRAGMA_UNROLL for (int64_t i = 0; i < Detail::kPointerCount; ++i) { AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_; int column_idx = (lane_in_quad % 2) + (((lane_in_quad / 2) + i) % Detail::kPointerCount) * 2; ptr += column_idx; if (i == 0) { pointers_[0 % Detail::kPointerCount] = ptr; } else if (i == 1) { pointers_[1 % Detail::kPointerCount] = ptr; } else if (i == 2) { pointers_[2 % Detail::kPointerCount] = ptr; } else if (i == 3) { pointers_[3 % Detail::kPointerCount] = ptr; } } } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) { CUTLASS_PRAGMA_UNROLL for (int64_t i = 0; i < Detail::kPointerCount; ++i) { pointers_[i] += pointer_offset / Policy::kElementsPerAccess; } return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) { CUTLASS_PRAGMA_UNROLL for (int64_t i = 0; i < Detail::kPointerCount; ++i) { pointers_[i] += tile_offset.row() * Shape::kRow * stride_ + tile_offset.column() * Shape::kColumn / Policy::kElementsPerAccess; } warp_column_ += tile_offset.column() * Shape::kColumn; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) { return add_tile_offset(tile_offset); } /// Store CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); AccessType *ptr = pointers_[0]; #if CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED // When the optimization is enabled, small tiles require separate logic. bool kN32_optimization = (WarpShape::kN * Detail::kLanesInQuad * Policy::kElementsPerAccess * sizeof_bits<Element>::value) % 1024 == 0; if (kN32_optimization) { int ptr_idx = ((warp_column_ * sizeof_bits<Element>::value) / 1024) % Detail::kPointerCount; if (ptr_idx == 0) { ptr = pointers_[0]; } else if (ptr_idx == 1) { ptr = pointers_[1]; } else if (ptr_idx == 2) { ptr = pointers_[2]; } else if (ptr_idx == 3) { ptr = pointers_[3]; } } #endif CUTLASS_PRAGMA_UNROLL for (int64_t n = 0; n < Policy::OperatorCount::kColumn; ++n) { #if CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED // // When the optimization is enabled, this expression suffices to obtain the SMEM pointer. // if (WarpShape::kN == 64) { ptr = pointers_[n / 4]; } else if (!kN32_optimization) #endif { // This is the reference implementation int column_idx = warp_column_ + n * Detail::kLanesInQuad * Policy::kElementsPerAccess; int ptr_idx = ((column_idx * sizeof_bits<Element>::value) / 1024) % Detail::kPointerCount; if (ptr_idx == 0) { ptr = pointers_[0 % Detail::kPointerCount]; } else if (ptr_idx == 1) { ptr = pointers_[1 % Detail::kPointerCount]; } else if (ptr_idx == 2) { ptr = pointers_[2 % Detail::kPointerCount]; } else if (ptr_idx == 3) { ptr = pointers_[3 % Detail::kPointerCount]; } } int offset = n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess; ptr[offset] = frag_ptr[n]; } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int64_t n = 0; n < Policy::OperatorCount::kColumn; ++n) { int column_idx = warp_column_ + n * Detail::kLanesInQuad * Policy::kElementsPerAccess; int ptr_idx = ((column_idx * sizeof_bits<Element>::value) / 1024) % Detail::kPointerCount; AccessType const *smem_ptr = pointers_[ptr_idx]; frag_ptr[n] = smem_ptr[n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess]; } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for int32_t x 16 => int8_t/int4b_t x 16 template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape), int OutputSizeBits ///< Size of output element in bits > class TileIteratorTensorOpMixed<WarpShape_, OperatorShape_, int32_t, 32, OutputSizeBits, 16, 8> { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using Element = int32_t; using Layout = layout::RowMajor; static int const kOutputElementCount = 16; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< Element, Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>; /// This is the complete warp-level accumulator tile. //using AccumulatorTile = typename Operator::FragmentC; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; // Internal constants struct Detail { static int const kLanesInQuad = 4; /// Number of pointers needed to write accumulators static int const kPointerCount = 2; /// Offsets added static int const kOffsetCount = 4; static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32)."); }; /// Padding quantity using Padding = MatrixShape<0, Detail::kLanesInQuad * 2>; private: /// Storage type for accessing memory using AccessType = AlignedArray<Element, 2>; // // Data members // /// Internal pointer to memory AccessType *pointers_[Detail::kPointerCount]; /// Stride in units of AccessType int stride_; /// Uniform offset in bytes added to warp tile iterator int uniform_offset_[Detail::kOffsetCount]; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed() { CUTLASS_PRAGMA_UNROLL for (int64_t i = 0; i < Detail::kPointerCount; ++i) { pointers_[i] = nullptr; } } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed( TensorRef const &ref, unsigned lane_id ): stride_(ref.stride()[0] / AccessType::kElements) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Detail::kPointerCount; ++i) { AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_; int column_idx = lane_in_quad ^ (i * 2); ptr += column_idx; if (i == 0) { pointers_[0] = ptr; } else if (i == 1) { pointers_[1] = ptr; } } CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Detail::kOffsetCount; ++i) { uniform_offset_[i] = (i ^ 0) * 4 * sizeof(AccessType); } } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) { CUTLASS_PRAGMA_UNROLL for (int64_t i = 0; i < Detail::kPointerCount; ++i) { pointers_[i] += pointer_offset / AccessType::kElements; } return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) { int ptr_offset = tile_offset.row() * Shape::kRow * stride_ + tile_offset.column() * Shape::kColumn / AccessType::kElements; pointers_[0] += ptr_offset; pointers_[1] += ptr_offset; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Detail::kOffsetCount; ++i) { uniform_offset_[i] = (i ^ tile_offset.column()) * 4 * sizeof(AccessType); } return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) { return add_tile_offset(tile_offset); } /// Store CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { int ptr_idx = (n / 4); int offset_idx = (n % 4); AccessType *ptr; if (ptr_idx == 0) { ptr = pointers_[0]; } else if (ptr_idx == 1) { ptr = pointers_[1]; } int offset = (n / 4) * 16 + pointer_offset / AccessType::kElements; #if 0 // // Using inline PTX to avoid generic memory // AccessType *smem_ptr = pointers_[ptr_idx]; smem_ptr[offset] = frag_ptr[n]; #else uint32_t smem_addr = arch::cutlass_get_smem_pointer(ptr); uint32_t const *data = reinterpret_cast<uint32_t const *>(frag_ptr + n); uint32_t offset_in_bytes = offset * sizeof(AccessType) + uniform_offset_[offset_idx]; asm volatile( "{ .reg .u32 smem_ptr; add.u32 smem_ptr, %0, %1; st.shared.v2.u32 [smem_ptr], {%2, %3}; }\n" : : "r"(smem_addr), "r"(offset_in_bytes), "r"(data[0]), "r"(data[1]) ); #endif } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for int32_t x 8 => int8_t/int4b_t x 8 template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) int OutputSizeBits ///< Size of output element in bits > class TileIteratorTensorOpMixed<WarpShape_, OperatorShape_, int32_t, 32, OutputSizeBits, 8, 8> { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using Element = int32_t; using Layout = layout::RowMajor; static int const kOutputElementCount = 8; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< Element, Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>; /// This is the complete warp-level accumulator tile. //using AccumulatorTile = typename Operator::FragmentC; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; // Internal constants struct Detail { static int const kLanesInQuad = 4; /// Number of pointers needed to write accumulators static int const kPointerCount = 2; static_assert(sizeof(Element) == 4, "This can only be used with 32b accumulator data types (f32, s32)."); }; /// Padding quantity using Padding = MatrixShape<0, Detail::kLanesInQuad * 2>; private: /// Storage type for accessing memory using AccessType = AlignedArray<Element, 2>; // // Data members // /// Internal pointer to memory AccessType *pointers_[Detail::kPointerCount]; /// Stride in units of AccessType int stride_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed() { CUTLASS_PRAGMA_UNROLL for (int64_t i = 0; i < Detail::kPointerCount; ++i) { pointers_[i] = nullptr; } } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed( TensorRef const &ref, unsigned lane_id ): stride_(ref.stride()[0] / AccessType::kElements) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Detail::kPointerCount; ++i) { AccessType *ptr = reinterpret_cast<AccessType *>(ref.data()) + quad_id * stride_; int column_idx = lane_in_quad ^ (i * 2); ptr += column_idx; if (i == 0) { pointers_[0] = ptr; } else if (i == 1) { pointers_[1] = ptr; } } } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed & add_pointer_offset(Index pointer_offset) { CUTLASS_PRAGMA_UNROLL for (int64_t i = 0; i < Detail::kPointerCount; ++i) { pointers_[i] += pointer_offset / AccessType::kElements; } return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed & add_tile_offset(TensorCoord const &tile_offset) { int ptr_offset = tile_offset.row() * Shape::kRow * stride_ + tile_offset.column() * Shape::kColumn / AccessType::kElements; pointers_[0] += ptr_offset; pointers_[1] += ptr_offset; if (tile_offset.column() % 2) { auto tmp = pointers_[0]; pointers_[0] = pointers_[1]; pointers_[1] = tmp; } return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOpMixed & operator+=(TensorCoord const &tile_offset) { return add_tile_offset(tile_offset); } /// Store CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { int ptr_idx = (n / 4); AccessType *ptr; if (ptr_idx == 0) { ptr = pointers_[0]; } else if (ptr_idx == 1) { ptr = pointers_[1]; } int offset = (n / 4) * 16 + pointer_offset / AccessType::kElements + (n % 4) * 4; #if 0 // // Using inline PTX to avoid generic memory // AccessType *smem_ptr = pointers_[ptr_idx]; smem_ptr[offset] = frag_ptr[n]; #else uint32_t smem_addr = arch::cutlass_get_smem_pointer(ptr); uint32_t const *data = reinterpret_cast<uint32_t const *>(frag_ptr + n); uint32_t offset_in_bytes = offset * sizeof(AccessType); asm volatile( "{ .reg .u32 smem_ptr; add.u32 smem_ptr, %0, %1; st.shared.v2.u32 [smem_ptr], {%2, %3}; }\n" : : "r"(smem_addr), "r"(offset_in_bytes), "r"(data[0]), "r"(data[1]) ); #endif } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////// #undef CUTLASS_EPILOGUE_WARP_TILE_ITERATOR_TENSOR_OP_MIXED_OPTIMIZATION_ENABLED /////////////////////////////////////////////////////////////////////////////////////////////////
22,857
C
30.398352
140
0.626241
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/fragment_iterator_gaussian_complex_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile that participate in one warp-level store operation. Typically, the accumulator tile is the largest single block of register-backed storage within the kernel. Storing it to memory is best accomplished by partitioning it into smaller tiles and storing these sequentially. Round trips through shared memory during the Epilogue phase require partitioning, as shared memory capacity is typically insufficient for a threadblock's total accumulator size. */ #pragma once #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/epilogue/warp/tensor_op_policy.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { //////////////////////////////////////////////////////////////////////////////// /// template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename OperatorElementC, ///< matrix multiply operation data type (concept: data type) typename OperatorFragmentC, ///< matrix multiply operation fragment (concept: Array) typename Layout ///< target shared memory layout > class FragmentIteratorGaussianComplexTensorOp; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major shared memory template < typename WarpShape_, ///< shape of the warp-level GEMM tile typename OperatorShape_, ///< underlying real-valued matrix multiply operation shape (concept: gemm::GemmShape) typename OperatorElementC_, ///< underlying real-valued matrix multiply operation data type typename OperatorFragmentC_ ///< underlying real-valued matrix multiply operation fragment (concept: Array) > class FragmentIteratorGaussianComplexTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_, layout::RowMajor> { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using OperatorElementC = OperatorElementC_; using OperatorFragmentC = OperatorFragmentC_; using Layout = layout::RowMajor; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< complex<OperatorElementC>, Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>; /// Size of one part of accumulator of 3-part accumulator in units of number of OperatorElementC static int const kElementsAccumulatorPerPart = OperatorFragmentC::kElements * Policy::OperatorCount::kRow * Policy::OperatorCount::kColumn; /// Offset into the accumulator fragment part 1 static int const kPart1Index = kElementsAccumulatorPerPart * 0; /// Offset into the accumulator fragment part 2 static int const kPart2Index = kElementsAccumulatorPerPart * 1; /// Offset into the accumulator fragment part 3 static int const kPart3Index = kElementsAccumulatorPerPart * 2; /// This is the complete warp-level accumulator tile holding part1, part2, and part3 using AccumulatorTile = Array<OperatorElementC, kElementsAccumulatorPerPart * 3>; /// This is the complete warp-level accumulator tile holding final output of complex<T> type using OutputAccumulatorTile = Array<complex<OperatorElementC>, kElementsAccumulatorPerPart>; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; private: /// Internal access type using AccessType = Array<OperatorElementC, Policy::kElementsPerAccess>; using FragmentAccessType = Array<complex<OperatorElementC>, Policy::kElementsPerAccess>; private: // // Data members // /// Accumulator tile AccessType const *accumulators_; /// Internal index int index_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE FragmentIteratorGaussianComplexTensorOp(AccumulatorTile const &accum): accumulators_(reinterpret_cast<AccessType const *>(&accum)), index_(0) { } /// Increments CUTLASS_HOST_DEVICE FragmentIteratorGaussianComplexTensorOp &operator++() { ++index_; return *this; } /// Decrements CUTLASS_HOST_DEVICE FragmentIteratorGaussianComplexTensorOp &operator--() { --index_; return *this; } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag, int index_offset = 0) const { int index = index_ + index_offset; FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { int accumulator_access_offset = index + n * Policy::kAccumulatorColumnStride / Policy::kElementsPerAccess; auto const & part1_accum_array = accumulators_[accumulator_access_offset + kPart1Index]; auto const & part2_accum_array = accumulators_[accumulator_access_offset + kPart2Index / Policy::kElementsPerAccess]; auto const & part3_accum_array = accumulators_[accumulator_access_offset + kPart3Index / Policy::kElementsPerAccess]; // Pack parts 1, 2, and 3 into a structure. This is likely to result in MOVs CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Policy::kElementsPerAccess; ++i) { frag_ptr[n][i].real() = part1_accum_array[i] - part3_accum_array[i]; frag_ptr[n][i].imag() = part1_accum_array[i] + part2_accum_array[i]; } } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
7,736
C
38.676923
132
0.683557
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/fragment_iterator_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile that participate in one warp-level store operation. Typically, the accumulator tile is the largest single block of register-backed storage within the kernel. Storing it to memory is best accomplished by partitioning it into smaller tiles and storing these sequentially. Round trips through shared memory during the Epilogue phase require partitioning, as shared memory capacity is typically insufficient for a threadblock's total accumulator size. */ #pragma once #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/epilogue/warp/tensor_op_policy.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { //////////////////////////////////////////////////////////////////////////////// /// template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename OperatorElementC, ///< matrix multiply operation data type (concept: data type) typename OperatorFragmentC, ///< matrix multiply operation fragment (concept: Array) typename Layout ///< target shared memory layout > class FragmentIteratorTensorOp; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major shared memory template < typename WarpShape_, ///< shape of the warp-level GEMM tile typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename OperatorElementC_, ///< matrix multiply operation data type (concept: data type) typename OperatorFragmentC_ ///< matrix multiply operation fragment (concept: Array) > class FragmentIteratorTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_, layout::RowMajor> { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using OperatorElementC = OperatorElementC_; using OperatorFragmentC = OperatorFragmentC_; using Layout = layout::RowMajor; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< OperatorElementC, Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = Array< OperatorElementC, OperatorFragmentC::kElements * Policy::OperatorCount::kRow * Policy::OperatorCount::kColumn>; using OutputAccumulatorTile = AccumulatorTile; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; using TileIterations = typename Policy::TileIterations; static int const kIterationsPerTile = kIterations / TileIterations::kCount; private: /// Internal access type using AccessType = Array<OperatorElementC, Policy::kElementsPerAccess>; private: // // Data members // /// Accumulator tile AccessType const *accumulators_; /// Internal index int index_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE FragmentIteratorTensorOp(AccumulatorTile const &accum): accumulators_(reinterpret_cast<AccessType const *>(&accum)), index_(0) { } /// Increments CUTLASS_HOST_DEVICE FragmentIteratorTensorOp &operator++() { ++index_; return *this; } /// Decrements CUTLASS_HOST_DEVICE FragmentIteratorTensorOp &operator--() { --index_; return *this; } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag, int index_offset = 0) const { int index = index_ + index_offset; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { int accumulator_access_offset = index + n * Policy::kAccumulatorColumnStride / Policy::kElementsPerAccess; frag_ptr[n] = accumulators_[accumulator_access_offset]; } } }; //////////////////////////////////////////////////////////////////////////////// /// Dedicated to interleaved layout template < /// shape of the warp-level GEMM tile typename WarpShape_, /// matrix multiply operator shape (concept: gemm::GemmShape) typename OperatorShape_, /// matrix multiply operator data type (concept: data type) typename OperatorElementC_, /// matrix multiply operator fragment (concept: Array) typename OperatorFragmentC_, /// number of interleaved k int InterleavedK> class FragmentIteratorTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_, layout::ColumnMajorInterleaved<InterleavedK>> { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using OperatorElementC = OperatorElementC_; using OperatorFragmentC = OperatorFragmentC_; static int const kInterleavedK = InterleavedK; using Layout = layout::ColumnMajorInterleaved<kInterleavedK>; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; /// This is the fragment size produced by one access of the iterator. using Fragment = Array<OperatorElementC, Policy::kElementsPerAccess * InterleavedK / OperatorShape::kN>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = Array<OperatorElementC, OperatorFragmentC::kElements * Policy::OperatorCount::kRow * Policy::OperatorCount::kColumn>; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; using TileIterations = typename Policy::TileIterations; static int const kIterationsPerTile = kIterations / TileIterations::kCount; private: /// Internal access type using AccessType = Array<OperatorElementC, Policy::kElementsPerAccess>; private: // // Data members // /// Accumulator tile AccessType const *accumulators_; /// Internal index int index_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE FragmentIteratorTensorOp(AccumulatorTile const &accum) : accumulators_(reinterpret_cast<AccessType const *>(&accum)), index_(0) {} /// Increments CUTLASS_HOST_DEVICE FragmentIteratorTensorOp &operator++() { ++index_; return *this; } /// Decrements CUTLASS_HOST_DEVICE FragmentIteratorTensorOp &operator--() { --index_; return *this; } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag, int index_offset = 0) const { int index = index_ + index_offset; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < (InterleavedK / OperatorShape::kN); ++n) { int index_m = index % (Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction); int index_n = index / (Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction); int accumulator_access_offset = (index_m / Policy::kIterationsPerInstruction) * (Policy::OperatorCount::kColumn * Policy::kIterationsPerInstruction) + (index_m % Policy::kIterationsPerInstruction) + index_n * (InterleavedK / OperatorShape::kN) * Policy::kIterationsPerInstruction + n * Policy::kIterationsPerInstruction; frag_ptr[n] = accumulators_[accumulator_access_offset]; } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
9,883
C
34.553957
117
0.663058
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile that participate in one warp-level store operation. Typically, the accumulator tile is the largest single block of register-backed storage within the kernel. Storing it to memory is best accomplished by partitioning it into smaller tiles and storing these sequentially. Round trips through shared memory during the Epilogue phase require partitioning, as shared memory capacity is typically insufficient for a threadblock's total accumulator size. */ #pragma once #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/warp/volta_tensor_op_policy.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename InterleavedTileShape, ///< shape of indivisible instruction-level arrangement (concept: GemmShape) typename ElementC, ///< Accumulator layout typename Layout ///< target shared memory layout > class FragmentIteratorVoltaTensorOp; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major shared memory template < typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape) > class FragmentIteratorVoltaTensorOp<WarpShape_, gemm::GemmShape<32, 32, 4>, half_t, layout::RowMajor> { public: using WarpShape = WarpShape_; using InterleavedTileShape = gemm::GemmShape<32, 32, 4>; using ElementC = half_t; using Layout = layout::RowMajor; /// Policy operator using Policy = VoltaTensorOpPolicy<WarpShape, InterleavedTileShape, ElementC, Layout>; /// Array type for aligned memory accesses using AccessType = typename Policy::AccessType; /// This is the fragment size produced by one access of the iterator. using Fragment = typename Policy::Fragment; /// This is the complete warp-level accumulator tile. using AccumulatorTile = typename Policy::AccumulatorTile; using OutputAccumulatorTile = AccumulatorTile; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; private: private: // // Data members // /// Accumulator tile AccessType const *accumulators_; /// Internal index int index_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE FragmentIteratorVoltaTensorOp(AccumulatorTile const &accum): accumulators_(reinterpret_cast<AccessType const *>(&accum)), index_(0) { } /// Increments CUTLASS_HOST_DEVICE FragmentIteratorVoltaTensorOp &operator++() { ++index_; return *this; } /// Decrements CUTLASS_HOST_DEVICE FragmentIteratorVoltaTensorOp &operator--() { --index_; return *this; } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag, int index_offset = 0) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); static int const kAccessesPerMma = Policy::kElementsPerMma / Policy::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) { int tile_access_idx = (tile_n * Policy::TileIterations::kRow + (index_ & 2) / 2) * Policy::MmaIterations::kCount * kAccessesPerMma; CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn * kAccessesPerMma; ++mma_n) { int mma_access_idx = ((mma_n & 1) * 2 + (index_ & 1)) * kAccessesPerMma + (mma_n & 2) / 2; frag_ptr[tile_n * Policy::MmaIterations::kColumn * kAccessesPerMma + mma_n] = accumulators_[tile_access_idx + mma_access_idx]; } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major shared memory template < typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape) > class FragmentIteratorVoltaTensorOp<WarpShape_, gemm::GemmShape<32, 32, 4>, float, layout::RowMajor> { public: using WarpShape = WarpShape_; using InterleavedTileShape = gemm::GemmShape<32, 32, 4>; using ElementC = float; using Layout = layout::RowMajor; /// Policy operator using Policy = VoltaTensorOpPolicy<WarpShape, InterleavedTileShape, ElementC, Layout>; /// Array type for aligned memory accesses using AccessType = typename Policy::AccessType; /// This is the fragment size produced by one access of the iterator. using Fragment = typename Policy::Fragment; /// This is the complete warp-level accumulator tile. using AccumulatorTile = typename Policy::AccumulatorTile; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; private: private: // // Data members // /// Accumulator tile AccessType const *accumulators_; /// Internal index int index_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE FragmentIteratorVoltaTensorOp(AccumulatorTile const &accum): accumulators_(reinterpret_cast<AccessType const *>(&accum)), index_(0) { } /// Increments CUTLASS_HOST_DEVICE FragmentIteratorVoltaTensorOp &operator++() { ++index_; return *this; } /// Decrements CUTLASS_HOST_DEVICE FragmentIteratorVoltaTensorOp &operator--() { --index_; return *this; } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag, int index_offset = 0) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); int const kRegsPerMmaRow = 2; CUTLASS_PRAGMA_UNROLL for (int reg_row = 0; reg_row < Policy::kRowsPerMmaTile; ++reg_row) { CUTLASS_PRAGMA_UNROLL for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) { CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn * 2; ++mma_n) { int mma_idx = (index_ & 1) + (index_ & 2) * Policy::MmaIterations::kCount / 2 + (tile_n * Policy::TileIterations::kRow) * Policy::MmaIterations::kCount + (mma_n & 1) * 2; int reg_offset = reg_row * kRegsPerMmaRow + (mma_n & 2) * 2; int reg_idx = mma_idx * Policy::kElementsPerMma + reg_offset; *frag_ptr = accumulators_[reg_idx / Policy::kElementsPerAccess]; ++frag_ptr; } } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
8,924
C
32.055555
117
0.639623
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/fragment_iterator_wmma_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile that participate in one warp-level store operation. Typically, the accumulator tile is the largest single block of register-backed storage within the kernel. Storing it to memory is best accomplished by partitioning it into smaller tiles and storing these sequentially. Round trips through shared memory during the Epilogue phase require partitioning, as shared memory capacity is typically insufficient for a threadblock's total accumulator size. */ #pragma once #if !(defined(__clang__) && defined(__CUDA__)) #include "cutlass/wmma_array.h" #include "cutlass/layout/matrix.h" #include "cutlass/epilogue/warp/wmma_tensor_op_policy.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { //////////////////////////////////////////////////////////////////////////////// /// template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename OperatorElementC, ///< matrix multiply operation data type (concept: data type) typename OperatorFragmentC, ///< matrix multiply operation fragment (concept: nvcuda::cuda::fragment) typename Layout ///< target shared memory layout > class FragmentIteratorWmmaTensorOp; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major shared memory template < typename WarpShape_, ///< shape of the warp-level GEMM tile typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename OperatorElementC_, ///< matrix multiply operation data type (concept: data type) typename OperatorFragmentC_ ///< matrix multiply operation fragment (concept: nvcuda::cuda::fragment) > class FragmentIteratorWmmaTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_, layout::RowMajor> { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using OperatorElementC = OperatorElementC_; using OperatorFragmentC = OperatorFragmentC_; using Layout = layout::RowMajor; using Policy = WmmaTensorOpPolicy<WarpShape, OperatorShape, Layout>; /// This is the fragment size produced by one access of the iterator. using Fragment = WmmaFragmentArray<OperatorFragmentC, Policy::OperatorCount::kColumn>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = WmmaFragmentArray<OperatorFragmentC, Policy::OperatorCount::kCount>; using OutputAccumulatorTile = AccumulatorTile; private: /// Internal access type using AccessType = WmmaFragmentArray<OperatorFragmentC, Policy::kWmmaFragmentsPerAccess>; private: // // Data members // /// Accumulator tile AccessType const *accumulators_; /// Internal index int index_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE FragmentIteratorWmmaTensorOp(AccumulatorTile const &accum): accumulators_(reinterpret_cast<AccessType const *>(&accum)), index_(0) { } /// Increments CUTLASS_HOST_DEVICE FragmentIteratorWmmaTensorOp &operator++() { ++index_; return *this; } /// Decrements CUTLASS_HOST_DEVICE FragmentIteratorWmmaTensorOp &operator--() { --index_; return *this; } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag, int index_offset = 0) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for(int n=0; n < Policy::OperatorCount::kColumn; n++) { int accumulator_access_offset = index_ * Policy::OperatorCount::kColumn + n; frag_ptr[n] = accumulators_[accumulator_access_offset]; } } }; } // namespace warp } // namespace epilogue } // namespace cutlass //////////////////////////////////////////////////////////////////////////////// #else #error (defined(__clang__) && defined(__CUDA__)) #endif // !defined(__clang__)
6,045
C
35.642424
121
0.673284
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/tensor_op_policy.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic structures needed for implementing the warp-scoped phase of the epilogue. These quantities assume a 'column-major' arrangement of TensorOp instructions, of which a row-oriented slice is visible per iteration. */ #pragma once #include "cutlass/matrix_shape.h" #include "cutlass/layout/matrix.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { //////////////////////////////////////////////////////////////////////////////// /// Policy details related to the epilogue template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename OperatorShape, ///< matrix multiply operation shape (concept: gemm:GemmShape) typename Layout ///< target shared memory layout > struct TensorOpPolicy; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename OperatorShape ///< matrix multiply operation shape (concept: gemm::GemmShape) > struct TensorOpPolicy<WarpShape, OperatorShape, layout::RowMajor> { /// Number of operations using OperatorCount = MatrixShape< (WarpShape::kM + OperatorShape::kM - 1) / OperatorShape::kM, (WarpShape::kN + OperatorShape::kN - 1) / OperatorShape::kN >; // // Hard-coded constants regarding Tensor Operations // static int const kElementsPerAccess = 2; static int const kRowsPerIteration = 8; static bool const kDivisible = !(WarpShape::kM % OperatorShape::kM) && !(WarpShape::kN % OperatorShape::kN); // // Derived quantities // // Number of 'externally visible' iterations per actual instruction static int const kIterationsPerInstruction = OperatorShape::kM / kRowsPerIteration; // Number of externally visible iterations static int const kIterations = OperatorCount::kRow * kIterationsPerInstruction; using TileIterations = MatrixShape<kIterations, 1>; static int const kAccumulatorRowStride = kElementsPerAccess; static int const kAccumulatorColumnStride = kElementsPerAccess * OperatorCount::kRow * kIterationsPerInstruction; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for column-major-interleaved template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename OperatorShape, ///< matrix multiply operation (concept: arch::Mma) int InterleavedK ///< number of interleaved k > struct TensorOpPolicy<WarpShape, OperatorShape, layout::ColumnMajorInterleaved<InterleavedK> > { /// Number of operations using OperatorCount = MatrixShape<WarpShape::kM / OperatorShape::kM, WarpShape::kN / OperatorShape::kN>; // // Hard-coded constants regarding Tensor Operations // static int const kElementsPerAccess = 2; static int const kRowsPerIteration = 8; // // Derived quantities // // Number of 'externally visible' iterations per actual instruction static int const kIterationsPerInstruction = OperatorShape::kM / kRowsPerIteration; // Number of externally visible iterations static int const kIterations = WarpShape::kN / InterleavedK * OperatorCount::kRow * kIterationsPerInstruction; static int const kElementsPerIteration = InterleavedK / OperatorShape::kN * kElementsPerAccess; static int const kAccessPerIteration = kElementsPerIteration / kElementsPerAccess; // Number of externally visible iterations //static int const kTileIterations = OperatorCount::kRow * kIterationsPerInstruction; using TileIterations = MatrixShape<1, WarpShape::kN / InterleavedK>; }; //////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
5,979
C
39.134228
115
0.648938
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/warp/simt_policy.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic structures needed for implementing the warp-scoped phase of the epilogue. These quantities assume a 'column-major' arrangement of SimtOp instructions, of which a row-oriented slice is visible per iteration. */ #pragma once #include "cutlass/matrix_shape.h" #include "cutlass/layout/matrix.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename WarpShape, ///< shape of warp-level GEMM (concept: GemmShape) typename Operator, ///< matrix multiply operation (concept: arch::Mma) typename Layout, ///< destination layout in shared memory typename MmaSimtPolicy ///< policy defining lane arrangement (concept: MmaSimtPolicy) > struct SimtPolicy; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major template < typename WarpShape_, ///< shape of warp-level GEMM (concept: MatrixShape) typename Operator_, ///< matrix multiply operation (concept: arch::Mma) typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy) > struct SimtPolicy<WarpShape_, Operator_, layout::RowMajor, MmaSimtPolicy_> { using WarpShape = WarpShape_; using Operator = Operator_; using MmaSimtPolicy = MmaSimtPolicy_; static_assert(!(WarpShape::kM % MmaSimtPolicy::WarpShape::kRow), "Divisibility"); static_assert(!(WarpShape::kN % MmaSimtPolicy::WarpShape::kColumn), "Divisibility"); /// Number of iterations static int const kIterations = WarpShape::kM / MmaSimtPolicy::WarpShape::kRow; /// Number of accumulators written per iteration static int const kElementsPerIteration = (WarpShape::kN / MmaSimtPolicy::WarpShape::kColumn); /// Total number of accumulators static int const kAccumulatorElementCount = kElementsPerIteration * kIterations; /// Number of consecutive elements static int const kElementsPerAccess = MmaSimtPolicy::LaneMmaShape::kN; /// Number of rows per epilogue iteration static int const kRowsPerIteration = MmaSimtPolicy::WarpShape::kRow; /// Number of accesses made in one iteration static int const kAccessesPerIteration = kElementsPerIteration / kElementsPerAccess; /// Number of elements in between accumulator chunks of (LaneMmaShape::kM x LaneMmaShape::kN) using Delta = MatrixShape< MmaSimtPolicy::WarpShape::kRow * MmaSimtPolicy::LaneMmaShape::kM, MmaSimtPolicy::WarpShape::kColumn * MmaSimtPolicy::LaneMmaShape::kN >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
4,864
C
44.046296
100
0.63898
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_relu.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination with a maximum operation used by epilogues. */ #pragma once #include <cutlass/half.h> #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/activation.h" #include "cutlass/epilogue/thread/scale_type.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { /// Single source of truth for whether to unroll for `LinearCombinationClamp()` constexpr bool LinearCombinationReluIsHeavy() { return false; } } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements. /// /// D = alpha * accumulator + beta * source + uniform /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class LinearCombinationRelu { public: using ElementOutput = ElementOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; static int const kCount = Count; static const ScaleType::Kind kScale = Scale; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using FragmentCompute = Array<ElementCompute, kCount>; using FragmentScaleBias = Array<ElementCompute, kCount>; static FloatRoundStyle const kRound = Round; static bool const kIsHeavy = detail::LinearCombinationReluIsHeavy(); /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute threshold; ///< minimum value that is output ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), threshold(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta = ElementCompute(0), ElementCompute threshold = ElementCompute(0) ): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr = nullptr, ElementCompute threshold = ElementCompute(0) ): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; ElementCompute threshold_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationRelu(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); threshold_ = params.threshold; } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::OnlyAlphaPerChannelScaling) return false; if (Scale == ScaleType::Nothing) return false; return beta_ != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { // set to NaN to make ReLU no-op for all except last k partitions int64_t allones = -1; threshold_ = reinterpret_cast<ElementCompute const &>(allones); } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_source = source_converter(source); FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_add_source; multiply_add<FragmentCompute> mul_add_accumulator; ReLu<FragmentCompute> relu; if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } // Compute threshold optionally intermediate = relu(threshold_, intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_accumulator; ReLu<FragmentCompute> relu; if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum } // Compute threshold optionally intermediate = relu(threshold_, intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } /// Computes per-channel linear scaling and bias : D = scale * accumulator + bias /// Scale and Bias are from input Fragment CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentScaleBias const &scale, FragmentScaleBias const &bias) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform per-channel scale and bias FragmentCompute intermediate; multiply_add<FragmentCompute> mul_add_accumulator; if(Scale == ScaleType::OnlyAlphaPerChannelScaling) intermediate = mul_add_accumulator(scale, converted_accumulator, bias); // D = scale * Accum + bias else intermediate = mul_add_accumulator(alpha_, converted_accumulator, bias); // D = alpha * Accum + bias ReLu<FragmentCompute> relu; // Compute threshold optionally intermediate = relu(threshold_, intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Conditional guards to enable partial specialization for packed integers #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 720) && ((__CUDACC_VER_MAJOR__ > 10) || ((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2))) /// Applies a linear combination operator to an array of elements. /// /// D = alpha * accumulator + beta * source + uniform /// /// Special handling for int types template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ScaleType::Kind Scale, ///< Control Alpha and Beta scaling FloatRoundStyle Round > class LinearCombinationRelu <ElementOutput_, Count, int, float, Scale, Round> { public: using ElementOutput = ElementOutput_; using ElementAccumulator = int; using ElementCompute = float; static bool const kIsHeavy = detail::LinearCombinationReluIsHeavy(); static int const kCount = Count; static const ScaleType::Kind kScale = Scale; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using FragmentCompute = Array<ElementCompute, kCount>; using FragmentScaleBias = Array<ElementCompute, kCount>; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute threshold; ///< minimum value that is output ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), threshold(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta = ElementCompute(0), ElementCompute threshold = ElementCompute(0) ): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr = nullptr, ElementCompute threshold = ElementCompute(0) ): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; ElementCompute threshold_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationRelu(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); threshold_ = params.threshold; } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::OnlyAlphaPerChannelScaling) return false; if (Scale == ScaleType::Nothing) return false; return beta_ != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { // set to NaN to make ReLU no-op for all except last k partitions int64_t allones = -1; threshold_ = reinterpret_cast<ElementCompute const &>(allones); } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_source = source_converter(source); FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_add_source; multiply_add<FragmentCompute> mul_add_accumulator; ReLu<FragmentCompute> relu; if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } // Compute threshold optionally intermediate = relu(threshold_, intermediate); if (platform::numeric_limits<ElementOutput>::is_integer) { // Convert floats back to INT FragmentAccumulator scaled_accumulator; NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter; scaled_accumulator = compute_converter(intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, int, kCount, Round> destination_converter; return destination_converter(scaled_accumulator); } else { NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_accumulator; ReLu<FragmentCompute> relu; if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum } // Compute threshold optionally intermediate = relu(threshold_, intermediate); if (platform::numeric_limits<ElementOutput>::is_integer) { // Convert floats back to INT FragmentAccumulator scaled_accumulator; NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter; scaled_accumulator = compute_converter(intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, int, kCount, Round> destination_converter; return destination_converter(scaled_accumulator); } else { NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } } /// Computes per-channel linear scaling and bias : D = scale * accumulator + bias /// Scale and Bias are from input Fragment CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentScaleBias const &scale, FragmentScaleBias const &bias) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform per-channel scale and bias FragmentCompute intermediate; multiply_add<FragmentCompute> mul_add_accumulator; if(Scale == ScaleType::OnlyAlphaPerChannelScaling) intermediate = mul_add_accumulator(scale, converted_accumulator, bias); // D = scale * Accum + bias else intermediate = mul_add_accumulator(alpha_, converted_accumulator, bias); // D = alpha * Accum + bias ReLu<FragmentCompute> relu; // Compute threshold optionally intermediate = relu(threshold_, intermediate); if (platform::numeric_limits<ElementOutput>::is_integer) { // Convert floats back to INT FragmentAccumulator scaled_accumulator; NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter; scaled_accumulator = compute_converter(intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, int, kCount, Round> destination_converter; return destination_converter(scaled_accumulator); } else { NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } } }; #endif // Conditional guards to enable partial specialization for packed integers ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
20,486
C
34.879159
150
0.673191
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_residual_block.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue functor specialized for residual blocks in deep neural networks. */ #pragma once #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { namespace detail { /// Dummy class used to designate that the second binary operator in the epilogue is unsued template <typename T> class NoOp {}; } /// Models a residual block of the form: UnaryOp(BinaryOp(BinaryOp(ActivationOp(TensorOp(X) + bias), residual1), residual2)) template <typename ElementOutput_, typename ElementAccumulator_, typename ElementCompute_, typename ElementC_, int ElementsPerAccess, template <typename T> class ActivationOp_, template <typename T> class BinaryOp1_, template <typename T> class UnaryOp_, template <typename T> class BinaryOp2_ = detail::NoOp> class LinearCombinationResidualBlock { public: static bool const kIsSingleSource = false; using ElementOutput = ElementC_; using ElementC = ElementC_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; static int const kElementsPerAccess = ElementsPerAccess; static int const kCount = kElementsPerAccess; using UnaryOp = UnaryOp_<Array<ElementCompute, kCount>>; using BinaryOp1 = BinaryOp1_<Array<ElementCompute, kCount>>; using BinaryOp2 = BinaryOp2_<Array<ElementCompute, kCount>>; using ActivationOp = ActivationOp_<Array<ElementCompute, kCount>>; using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>; using FragmentCompute = Array<ElementCompute, kElementsPerAccess>; using FragmentC = Array<ElementC, kElementsPerAccess>; using FragmentOutput = Array<ElementOutput, kElementsPerAccess>; using ElementZ = ElementOutput_; using ElementT = ElementZ; using FragmentZ = Array<ElementZ, kElementsPerAccess>; using FragmentT = Array<ElementT, kElementsPerAccess>; static bool const kIsHeavy = true; static bool const kStoreZ = true; static bool const kStoreT = false; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales residual input ElementCompute const *alpha_ptr{nullptr}; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr{nullptr}; ///< pointer to residual scalar - if not null, loads it from memory CUTLASS_HOST_DEVICE Params() : alpha(ElementCompute(1)), beta(ElementCompute(1)) {} CUTLASS_HOST_DEVICE Params(ElementCompute alpha, ElementCompute beta) : alpha(alpha), beta(beta) {} CUTLASS_HOST_DEVICE Params(ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr) : alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {} }; private: ElementCompute alpha_; ElementCompute beta_; bool skip_elementwise_; public: /// Constructor from Params CUTLASS_HOST_DEVICE LinearCombinationResidualBlock(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); skip_elementwise_ = false; } /// The "source" tensor corresponds to the residual input CUTLASS_HOST_DEVICE bool is_source_needed() const { return true; } /// Functionally required for serial reduction in the epilogue /// IMPORTANT: Split-k is supported only when ActivationOp is Identity. CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { skip_elementwise_ = true; } } /// Applies the operation UnaryOp(BinaryOp(BinaryOp(ActivationOp(AB + bias), residual1), residual2)) CUTLASS_HOST_DEVICE void operator()(FragmentOutput &frag_Z, FragmentOutput &, FragmentAccumulator const &AB, FragmentC const &residual1, FragmentC const &residual2, FragmentCompute const &bias) const { UnaryOp unary_op; BinaryOp1 binary_op1; BinaryOp2 binary_op2; ActivationOp activation; FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB); FragmentCompute tmp_residual1 = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(residual1); FragmentCompute tmp_residual2 = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(residual2); FragmentCompute z = binary_op2(binary_op1(activation(alpha_ * tmp_Accum + bias), beta_ * tmp_residual1), beta_ * tmp_residual2); FragmentCompute result_Z = skip_elementwise_ ? z : unary_op(z); NumericArrayConverter<ElementOutput, ElementCompute, kElementsPerAccess> convert_z; frag_Z = convert_z(result_Z); } /// Should never be called CUTLASS_HOST_DEVICE void operator()(FragmentOutput &, FragmentOutput &, FragmentAccumulator const &, FragmentCompute const &) const {} }; /// Models a residual block of the form: UnaryOp(BinaryOp(ActivationOp(TensorOp(X) + bias), residual)) template <typename ElementOutput_, typename ElementAccumulator_, typename ElementCompute_, typename ElementC_, int ElementsPerAccess, template <typename T> class ActivationOp_, template <typename T> class BinaryOp1_, template <typename T> class UnaryOp_> class LinearCombinationResidualBlock<ElementOutput_, ElementAccumulator_, ElementCompute_, ElementC_, ElementsPerAccess, ActivationOp_, BinaryOp1_, UnaryOp_, detail::NoOp> { public: static bool const kIsSingleSource = true; using ElementOutput = ElementC_; using ElementC = ElementC_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; static int const kElementsPerAccess = ElementsPerAccess; static int const kCount = kElementsPerAccess; using UnaryOp = UnaryOp_<Array<ElementCompute, kCount>>; using BinaryOp = BinaryOp1_<Array<ElementCompute, kCount>>; using ActivationOp = ActivationOp_<Array<ElementCompute, kCount>>; using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>; using FragmentCompute = Array<ElementCompute, kElementsPerAccess>; using FragmentC = Array<ElementC, kElementsPerAccess>; using FragmentOutput = Array<ElementOutput, kElementsPerAccess>; using ElementZ = ElementOutput_; using ElementT = ElementZ; using FragmentZ = Array<ElementZ, kElementsPerAccess>; using FragmentT = Array<ElementT, kElementsPerAccess>; static bool const kIsHeavy = true; static bool const kStoreZ = true; static bool const kStoreT = false; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales residual input ElementCompute const *alpha_ptr{nullptr}; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr{nullptr}; ///< pointer to residual scalar - if not null, loads it from memory CUTLASS_HOST_DEVICE Params() : alpha(ElementCompute(1)), beta(ElementCompute(1)) {} CUTLASS_HOST_DEVICE Params(ElementCompute alpha, ElementCompute beta) : alpha(alpha), beta(beta) {} CUTLASS_HOST_DEVICE Params(ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr) : alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {} }; private: ElementCompute alpha_; ElementCompute beta_; bool skip_elementwise_; public: /// Constructor from Params CUTLASS_HOST_DEVICE LinearCombinationResidualBlock(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); skip_elementwise_ = false; } /// The "source" tensor corresponds to the residual input CUTLASS_HOST_DEVICE bool is_source_needed() const { return true; } /// Functionally required for serial reduction in the epilogue /// IMPORTANT: Split-k is supported only when ActivationOp is Identity. CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { skip_elementwise_ = true; } } /// Applies the operation UnaryOp(BinaryOp(ActivationOp(AB + bias), residual)) CUTLASS_HOST_DEVICE void operator()(FragmentOutput &frag_Z, FragmentOutput &, FragmentAccumulator const &AB, FragmentC const &residual, FragmentCompute const &bias) const { UnaryOp unary_op; BinaryOp binary_op; ActivationOp activation; FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB); FragmentCompute tmp_residual = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(residual); FragmentCompute z = binary_op(activation(alpha_ * tmp_Accum + bias), beta_ * tmp_residual); FragmentCompute result_Z = skip_elementwise_ ? z : unary_op(z); NumericArrayConverter<ElementOutput, ElementCompute, kElementsPerAccess> convert_z; frag_Z = convert_z(result_Z); } /// Should never be called CUTLASS_HOST_DEVICE void operator()(FragmentOutput &, FragmentOutput &, FragmentAccumulator const &, FragmentCompute const &) const {} }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
11,855
C
38.128713
124
0.690257
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_bias_elementwise.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/activation.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// This base class is meant to define the concept required of the /// EpilogueWithBroadcast::OutputOp template < typename ElementC_, typename ElementAccumulator_, typename ElementCompute_, typename ElementZ_, typename ElementT_, int ElementsPerAccess, typename ElementwiseOp_ = Identity<ElementCompute_>, typename BinaryOp_ = plus<ElementCompute_> > class LinearCombinationBiasElementwise { public: using ElementOutput = ElementC_; using ElementC = ElementC_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; using ElementZ = ElementZ_; using ElementT = ElementT_; static int const kElementsPerAccess = ElementsPerAccess; static int const kCount = kElementsPerAccess; using ElementwiseOp = ElementwiseOp_; using BinaryOp = BinaryOp_; // Indicates that this epilogue applies only one binary operation static bool const kIsSingleSource = true; using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>; using FragmentCompute = Array<ElementCompute, kElementsPerAccess>; using FragmentC = Array<ElementOutput, kElementsPerAccess>; using FragmentZ = Array<ElementZ, kElementsPerAccess>; using FragmentT = Array<ElementT, kElementsPerAccess>; using FragmentOutput = FragmentZ; static bool const kIsHeavy = ElementwiseOp::kIsHeavy; /// If true, the 'Z' tensor is stored static bool const kStoreZ = true; /// If true, the 'T' tensor is stored static bool const kStoreT = true; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta ): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha ): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; bool skip_elementwise_; public: // // Methods // /// Constructor from Params CUTLASS_HOST_DEVICE LinearCombinationBiasElementwise(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); skip_elementwise_ = false; } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { return beta_ != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { skip_elementwise_ = true; } } /// Applies the operation when is_source_needed() is true CUTLASS_HOST_DEVICE void operator()( FragmentZ &frag_Z, FragmentT &frag_T, FragmentAccumulator const &AB, FragmentC const &frag_C, FragmentCompute const &V) const { ElementwiseOp elementwise_op; BinaryOp binary_op; FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB); FragmentCompute tmp_C = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(frag_C); FragmentCompute result_Z; FragmentCompute result_T; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kElementsPerAccess; ++i) { ElementCompute z = binary_op(alpha_ * tmp_Accum[i] + beta_ * tmp_C[i], V[i]); result_T[i] = z; result_Z[i] = skip_elementwise_ ? z : elementwise_op(z); } NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z; frag_Z = convert_z(result_Z); NumericArrayConverter<ElementT, ElementCompute, kElementsPerAccess> convert_t; frag_T = convert_t(result_T); } /// Applies the operation when is_source_needed() is false CUTLASS_HOST_DEVICE void operator()( FragmentZ &frag_Z, FragmentT &frag_T, FragmentAccumulator const &AB, FragmentCompute const &V) const { ElementwiseOp elementwise_op; BinaryOp binary_op; FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB); FragmentCompute result_Z; FragmentCompute result_T; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kElementsPerAccess; ++i) { ElementCompute z = binary_op(alpha_ * tmp_Accum[i], V[i]); result_T[i] = z; result_Z[i] = skip_elementwise_ ? z : elementwise_op(z); } NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z; frag_Z = convert_z(result_Z); NumericArrayConverter<ElementT, ElementCompute, kElementsPerAccess> convert_t; frag_T = convert_t(result_T); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
8,344
C
30.97318
116
0.657958
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_generic.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/scale_type.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator followed by an activation function to an array of elements. /// /// D = activation(alpha * accumulator + beta * source + uniform) /// template < template<typename T> class ActivationFunctor, typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest, bool IsHeavy = false > class LinearCombinationGeneric { public: using ElementOutput = ElementOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; static bool const kIsHeavy = IsHeavy; static int const kCount = Count; static const ScaleType::Kind kScale = Scale; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using FragmentCompute = Array<ElementCompute, kCount>; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure using Params = typename ActivationFunctor<FragmentCompute>::Params; private: // // Data members // Params params_; bool skip_elementwise_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationGeneric(Params const &params) { params_ = params; params_.alpha = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); params_.beta = (params.beta_ptr ? *params.beta_ptr : params.beta); skip_elementwise_ = false; } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::Nothing) return false; return params_.beta != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { params_.beta = ElementCompute(1); } if (k_partition != k_partition_count - 1) { skip_elementwise_ = true; } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_source = source_converter(source); FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_add_source; multiply_add<FragmentCompute> mul_add_accumulator; ActivationFunctor<FragmentCompute> activation; if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_accumulator(params_.alpha, converted_accumulator, intermediate); // D = alpha * Accum + X } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_source(params_.beta, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(params_.alpha, converted_accumulator, intermediate); // D = alpha * Accum + X } intermediate = skip_elementwise_ ? intermediate : activation(intermediate, params_); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_add_accumulator; ActivationFunctor<FragmentCompute> activation; if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_accumulator(params_.alpha, converted_accumulator); // D = alpha * Accum } intermediate = skip_elementwise_ ? intermediate : activation(intermediate, params_); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
8,065
C
37.778846
129
0.673404
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_relu0.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination with a relu operation used by epilogues. This one only supports relu0 and tries to folding relu into other instructions. Thus, serial splitk is not supported by this one. For example, relu can be folded into hfma2/hmul2 for sm80+ */ #pragma once #include <cutlass/half.h> #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/activation.h" #include "cutlass/epilogue/thread/scale_type.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { /// Single source of truth for whether to unroll for `LinearCombinationClamp()` constexpr bool LinearCombinationRelu0IsHeavy() { return false; } } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements. /// /// D = alpha * accumulator + beta * source + uniform /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class LinearCombinationRelu0 { public: using ElementOutput = ElementOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; static int const kCount = Count; static const ScaleType::Kind kScale = Scale; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using FragmentCompute = Array<ElementCompute, kCount>; using FragmentScaleBias = Array<ElementCompute, kCount>; static FloatRoundStyle const kRound = Round; static bool const kIsHeavy = detail::LinearCombinationRelu0IsHeavy(); /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta = ElementCompute(0) ): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr = nullptr ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationRelu0(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::Nothing) return false; return beta_ != ElementCompute(0); } /// This is used for serial reduction which is not supported by Relu0 CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { assert(k_partition == 0); } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_source = source_converter(source); FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_add_source; multiply_add_relu0<FragmentCompute> mul_add_relu0_accumulator; ReLu<FragmentCompute> relu; if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_relu0_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; // Compute threshold optionally intermediate = relu(intermediate); } else { intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform intermediate = mul_add_relu0_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_accumulator; ReLu<FragmentCompute> relu; if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum } // Compute threshold optionally intermediate = relu(intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } /// Computes per-channel linear scaling and bias : D = scale * accumulator + bias /// Scale and Bias are from input Fragment CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentScaleBias const &scale, FragmentScaleBias const &bias) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform per-channel scale and bias FragmentCompute intermediate; multiply_add<FragmentCompute> mul_add_accumulator; if(Scale == ScaleType::OnlyAlphaPerChannelScaling) intermediate = mul_add_accumulator(scale, converted_accumulator, bias); // D = scale * Accum + bias else intermediate = mul_add_accumulator(alpha_, converted_accumulator, bias); // D = alpha * Accum + bias ReLu<FragmentCompute> relu; // Compute threshold optionally intermediate = relu(intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Conditional guards to enable partial specialization for packed integers #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 720) && ((__CUDACC_VER_MAJOR__ > 10) || ((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2))) /// Applies a linear combination operator to an array of elements. /// /// D = alpha * accumulator + beta * source + uniform /// /// Special handling for int types template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ScaleType::Kind Scale, ///< Control Alpha and Beta scaling FloatRoundStyle Round > class LinearCombinationRelu0 <ElementOutput_, Count, int, float, Scale, Round> { public: using ElementOutput = ElementOutput_; using ElementAccumulator = int; using ElementCompute = float; static bool const kIsHeavy = detail::LinearCombinationRelu0IsHeavy(); static int const kCount = Count; static const ScaleType::Kind kScale = Scale; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using FragmentCompute = Array<ElementCompute, kCount>; using FragmentScaleBias = Array<ElementCompute, kCount>; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta = ElementCompute(0) ): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr = nullptr ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationRelu0(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::Nothing) return false; return beta_ != ElementCompute(0); } /// This is used for serial reduction which is not supported by Relu0 CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { assert(k_partition == 0); } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_source = source_converter(source); FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_add_source; multiply_add<FragmentCompute> mul_add_accumulator; ReLu<FragmentCompute> relu; if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } // Compute threshold optionally intermediate = relu(intermediate); if (platform::numeric_limits<ElementOutput>::is_integer) { // Convert floats back to INT FragmentAccumulator scaled_accumulator; NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter; scaled_accumulator = compute_converter(intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, int, kCount, Round> destination_converter; return destination_converter(scaled_accumulator); } else { NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_accumulator; ReLu<FragmentCompute> relu; if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum } // Compute threshold optionally intermediate = relu(intermediate); if (platform::numeric_limits<ElementOutput>::is_integer) { // Convert floats back to INT FragmentAccumulator scaled_accumulator; NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter; scaled_accumulator = compute_converter(intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, int, kCount, Round> destination_converter; return destination_converter(scaled_accumulator); } else { NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } } /// Computes per-channel linear scaling and bias : D = scale * accumulator + bias /// Scale and Bias are from input Fragment CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentScaleBias const &scale, FragmentScaleBias const &bias) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform per-channel scale and bias FragmentCompute intermediate; multiply_add<FragmentCompute> mul_add_accumulator; if(Scale == ScaleType::OnlyAlphaPerChannelScaling) intermediate = mul_add_accumulator(scale, converted_accumulator, bias); // D = scale * Accum + bias else intermediate = mul_add_accumulator(alpha_, converted_accumulator, bias); // D = alpha * Accum + bias ReLu<FragmentCompute> relu; // Compute threshold optionally intermediate = relu(intermediate); if (platform::numeric_limits<ElementOutput>::is_integer) { // Convert floats back to INT FragmentAccumulator scaled_accumulator; NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter; scaled_accumulator = compute_converter(intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, int, kCount, Round> destination_converter; return destination_converter(scaled_accumulator); } else { NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } } }; #endif // Conditional guards to enable partial specialization for packed integers ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
19,348
C
34.699262
150
0.674075
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_sigmoid.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination with Sigmoid operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/epilogue/thread/activation.h" #include "cutlass/epilogue/thread/linear_combination_generic.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator followed by the Sigmoid activation, to an array of elements. /// /// D = sigmoid(alpha * accumulator + beta * source + uniform) /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > using LinearCombinationSigmoid = LinearCombinationGeneric<Sigmoid, ElementOutput_, Count, ElementAccumulator_, ElementCompute_, Scale, Round, true>; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
3,688
C
50.957746
129
0.609273
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_bias_relu.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination operations used by epilogues. */ #pragma once #include <cuda_fp16.h> #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/activation.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template <typename Element, int ElementsPerAccess> struct ArrayMaximum { CUTLASS_HOST_DEVICE Array<Element, ElementsPerAccess> operator()( Array<Element, ElementsPerAccess> const &lhs, Array<Element, ElementsPerAccess> const &rhs) const { Array<Element, ElementsPerAccess> result; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < ElementsPerAccess; ++i) { result[i] = fmax(lhs[i], rhs[i]); } return result; } }; template <int ElementsPerAccess> struct ArrayMaximum<half_t, ElementsPerAccess> { CUTLASS_DEVICE Array<half_t, ElementsPerAccess> operator()( Array<half_t, ElementsPerAccess> const &lhs, Array<half_t, ElementsPerAccess> const &rhs) const { Array<half_t, ElementsPerAccess> result; #if __CUDA_ARCH__ >= 800 int const kVectorCount = ElementsPerAccess / 2; __half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(lhs.raw_data()); __half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(rhs.raw_data()); __half2 *res_ptr = reinterpret_cast<__half2 *>(result.raw_data()); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kVectorCount; ++i) { res_ptr[i] = __hmax2(lhs_ptr[i], rhs_ptr[i]); } #else __half const *lhs_ptr = reinterpret_cast<__half const *>(lhs.raw_data()); __half const *rhs_ptr = reinterpret_cast<__half const *>(rhs.raw_data()); __half *res_ptr = reinterpret_cast<__half *>(result.raw_data()); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < ElementsPerAccess; ++i) { res_ptr[i] = ((lhs_ptr[i] < rhs_ptr[i]) ? rhs_ptr[i] : lhs_ptr[i]); } #endif return result; } CUTLASS_DEVICE Array<half_t, ElementsPerAccess> operator()( Array<half_t, ElementsPerAccess> const &lhs, half_t const &rhs) const { Array<half_t, ElementsPerAccess> result; #if __CUDA_ARCH__ >= 800 int const kVectorCount = ElementsPerAccess / 2; __half rhs_raw = reinterpret_cast<__half const &>(rhs); __half2 rhs_pair = __half2half2(rhs_raw); __half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(lhs.raw_data()); __half2 *res_ptr = reinterpret_cast<__half2 *>(result.raw_data()); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kVectorCount; ++i) { res_ptr[i] = __hmax2(lhs_ptr[i], rhs_pair); } #else __half const *lhs_ptr = reinterpret_cast<__half const *>(lhs.raw_data()); __half const rhs_raw = reinterpret_cast<__half const &>(rhs); __half *res_ptr = reinterpret_cast<__half *>(result.raw_data()); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < ElementsPerAccess; ++i) { res_ptr[i] = ((lhs_ptr[i] < rhs_raw) ? rhs_raw : lhs_ptr[i]); } #endif return result; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Element, int ElementsPerAccess> struct ReluConditional { CUTLASS_HOST_DEVICE void operator()( bool conditional[], Array<Element, ElementsPerAccess> const &fragment, Element threshold) const { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < ElementsPerAccess; ++i) { conditional[i] = !(fragment[i] < threshold); } } }; template <int ElementsPerAccess> struct ReluConditional<half_t, ElementsPerAccess> { CUTLASS_DEVICE void operator()( bool conditional[], Array<half_t, ElementsPerAccess> const &fragment, half_t threshold) const { __half y = reinterpret_cast<__half const &>(threshold); __half const *x = reinterpret_cast<__half const *>(fragment.raw_data()); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < ElementsPerAccess; ++i) { conditional[i] = !__hlt(x[i], y); } } }; } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// /// This is a partial specialization for fused Bias and ReLU. It supports the option of packing /// ReLU conditionals in a bit vector that may be used by backwards passes as an optimization. /// /// This class can only be used with cutlass::epilogue::threadblock::EpilogueWithBroadcast<>. /// /// This base class is meant to define the concept required of the /// EpilogueWithBroadcast::OutputOp template < typename ElementC_, typename ElementAccumulator_, typename ElementCompute_, typename ElementZ_, int ElementsPerAccess, bool StoreT = true > class LinearCombinationBiasRelu { public: using ElementOutput = ElementC_; using ElementC = ElementC_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; using ElementZ = ElementZ_; using ElementT = uint1b_t; static int const kElementsPerAccess = ElementsPerAccess; static int const kCount = kElementsPerAccess; using ElementwiseOp = ReLu<ElementCompute>; using BinaryOp = plus<ElementCompute>; // Indicates that this epilogue applies only one binary operation static bool const kIsSingleSource = true; using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>; using FragmentCompute = Array<ElementCompute, kElementsPerAccess>; using FragmentC = Array<ElementOutput, kElementsPerAccess>; using FragmentZ = Array<ElementZ, kElementsPerAccess>; using FragmentT = Array<ElementT, kElementsPerAccess>; /// If true, the 'Z' tensor is stored static bool const kStoreZ = true; /// If true, the 'T' tensor is stored static bool const kStoreT = StoreT; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory ElementZ threshold; ///< ReLu threshold // // Methods // // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute()), alpha_ptr(nullptr), beta_ptr(nullptr), threshold(ElementCompute()) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta, ElementCompute threshold_ = ElementCompute() ): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) { NumericConverter<ElementZ, ElementCompute> convert_threshold; threshold = convert_threshold(threshold_); } CUTLASS_HOST_DEVICE Params( ElementCompute alpha ): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr), threshold(ElementZ()) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr, ElementCompute threshold_ = ElementCompute() ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { NumericConverter<ElementZ, ElementCompute> convert_threshold; threshold = convert_threshold(threshold_); } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr), threshold(ElementZ()) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; ElementZ threshold_; public: // // Methods // /// Constructor from Params CUTLASS_HOST_DEVICE LinearCombinationBiasRelu(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); threshold_ = params.threshold; } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { return beta_ != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { // set to NaN to make ReLU no-op for all except last k partitions int64_t allones = -1; threshold_ = reinterpret_cast<ElementZ const &>(allones); } } /// Applies the operation when is_source_needed() is true CUTLASS_HOST_DEVICE void operator()( FragmentZ &frag_Z, FragmentT &frag_T, FragmentAccumulator const &AB, FragmentC const &frag_C, FragmentCompute const &V) const { BinaryOp binary_op; FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB); FragmentCompute tmp_C = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(frag_C); FragmentCompute result_Z; bool conditions[kElementsPerAccess]; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kElementsPerAccess; ++i) { ElementCompute z = alpha_ * tmp_Accum[i]; z += beta_ * tmp_C[i]; z = binary_op(z, V[i]); result_Z[i] = z; } NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z; frag_Z = convert_z(result_Z); // // Compute condition // detail::ReluConditional<ElementZ, kElementsPerAccess> relu_conditional; relu_conditional(conditions, frag_Z, threshold_); detail::ArrayMaximum<ElementZ, kElementsPerAccess> maximum_op; frag_Z = maximum_op(frag_Z, threshold_); if (kStoreT) { PackPredicates<kElementsPerAccess> pack_predicates; frag_T = pack_predicates(conditions); } } /// Applies the operation when is_source_needed() is false CUTLASS_HOST_DEVICE void operator()( FragmentZ &frag_Z, FragmentT &frag_T, FragmentAccumulator const &AB, FragmentCompute const &V) const { BinaryOp binary_op; FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB); FragmentCompute result_Z; bool conditions[kElementsPerAccess]; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kElementsPerAccess; ++i) { ElementCompute z = binary_op(alpha_ * tmp_Accum[i], V[i]); result_Z[i] = z; } NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z; frag_Z = convert_z(result_Z); // // Compute condition // detail::ReluConditional<ElementZ, kElementsPerAccess> relu_conditional; relu_conditional(conditions, frag_Z, threshold_); detail::ArrayMaximum<ElementZ, kElementsPerAccess> maximum_op; frag_Z = maximum_op(frag_Z, threshold_); // // Compute conditions // // // Store // if (kStoreT) { PackPredicates<kElementsPerAccess> pack_predicates; frag_T = pack_predicates(conditions); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
13,490
C
28.913525
116
0.637064
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_planar_complex.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination operations on planar-complex arrays */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/complex.h" #include "cutlass/array_planar_complex.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to arrays of planar-complex elements. /// /// D = alpha * accumulator + beta * source + uniform /// /// Note, as with most CUTLASS components for planar complex, the template arguments describe /// the underlying real data type. template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class LinearCombinationPlanarComplex { public: using ElementOutput = ElementOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; static int const kCount = Count; using FragmentOutput = ArrayPlanarComplex<ElementOutput, kCount>; using FragmentAccumulator = ArrayPlanarComplex<ElementAccumulator, kCount>; using ComputeFragment = ArrayPlanarComplex<ElementCompute, kCount>; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure struct Params { complex<ElementCompute> alpha; ///< scales accumulators complex<ElementCompute> beta; ///< scales source tensor complex<ElementCompute> const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory complex<ElementCompute> const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( complex<ElementCompute> alpha, complex<ElementCompute> beta ): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( complex<ElementCompute> const *alpha_ptr, complex<ElementCompute> const *beta_ptr ): alpha(complex<ElementCompute>()), beta(complex<ElementCompute>()), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } }; private: // // Data members // complex<ElementCompute> alpha_; complex<ElementCompute> beta_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationPlanarComplex(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { return beta_.real() != ElementCompute(0) || beta_.imag() != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; ComputeFragment converted_source( source_converter(source.real), source_converter(source.imag)); ComputeFragment converted_accumulator( accumulator_converter(accumulator.real), accumulator_converter(accumulator.imag)); // Perform binary operations ComputeFragment intermediate; multiplies<Array<ElementCompute, kCount> > mul_op; multiply_add<Array<ElementCompute, kCount> > mul_add_op; // complex multiply: I = beta * C intermediate.real = mul_op(beta_.real(), converted_source.real); intermediate.imag = mul_op(beta_.real(), converted_source.imag); intermediate.real = mul_add_op(-beta_.imag(), converted_source.imag, intermediate.real); intermediate.imag = mul_add_op( beta_.imag(), converted_source.real, intermediate.imag); // complex multiply-add: I = alpha * AB + I intermediate.real = mul_add_op(alpha_.real(), converted_accumulator.real, intermediate.real); intermediate.imag = mul_add_op(alpha_.real(), converted_accumulator.imag, intermediate.imag); intermediate.real = mul_add_op(-alpha_.imag(), converted_accumulator.imag, intermediate.real); intermediate.imag = mul_add_op( alpha_.imag(), converted_accumulator.real, intermediate.imag); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return FragmentOutput( destination_converter(intermediate.real), destination_converter(intermediate.imag)); } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; ComputeFragment converted_accumulator( accumulator_converter(accumulator.real), accumulator_converter(accumulator.imag)); // Perform binary operations ComputeFragment intermediate; multiplies<Array<ElementCompute, kCount> > mul_op; multiply_add<Array<ElementCompute, kCount> > mul_add_op; // complex multiply-add: I = alpha * AB + I intermediate.real = mul_add_op(alpha_.real(), converted_accumulator.real); intermediate.imag = mul_add_op(alpha_.real(), converted_accumulator.imag); intermediate.real = mul_add_op(-alpha_.imag(), converted_accumulator.imag, intermediate.real); intermediate.imag = mul_add_op( alpha_.imag(), converted_accumulator.real, intermediate.imag); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return FragmentOutput( destination_converter(intermediate.real), destination_converter(intermediate.imag)); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
9,351
C
38.294117
129
0.666667
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/activation.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This extends the contents of cutlass/functional.h with frequently used activation functions. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/constants.h" #include "cutlass/complex.h" #include "cutlass/array.h" #include "cutlass/half.h" #include "cutlass/functional.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct LinearCombinationGenericParams { T alpha; ///< scales accumulators T beta; ///< scales source tensor T const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory T const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE LinearCombinationGenericParams(): alpha(T(1)), beta(T(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE LinearCombinationGenericParams( T alpha, T beta = T(0) ): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE LinearCombinationGenericParams( T const *alpha_ptr, T const *beta_ptr = nullptr ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Identity operator template <typename T> struct Identity { static const bool kIsHeavy=false; CUTLASS_HOST_DEVICE T operator()(T value) const { return value; } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE T operator()(T const &value, Params const &params_) const { return this->operator()(value); } }; template <typename T, int N> struct Identity<Array<T, N> > { CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { return value; } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value, Params const &params_) const { return this->operator()(value); } }; /// ReLu operator - propagates NaNs /// Always put threshold in the right hand side of max to propagate NaN. template <typename T> struct ReLu { static const bool kIsHeavy=false; CUTLASS_HOST_DEVICE T operator()(T const & threshold, T value) const { maximum<T> mx; return mx(value, threshold); } CUTLASS_HOST_DEVICE T operator()(T value) const { maximum<T> mx; return mx(value, T(0)); } /// Host-constructable parameters structure using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE T operator()(T value, Params const &params_) const { return this->operator()(value); } }; template <typename T, int N> struct ReLu<Array<T, N>> { static const bool kIsHeavy=false; CUTLASS_HOST_DEVICE Array<T, N> operator()(T const & threshold, Array<T, N> const &frag) const { maximum<Array<T, N> > mx; return mx(frag, threshold); } CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &frag) const { maximum<Array<T, N> > mx; return mx(frag, T(0)); } /// Host-constructable parameters structure using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &frag, Params const &params_) const { return this->operator()(frag); } }; // Leaky Relu operator template <typename T> struct LeakyReLU { struct Params: LinearCombinationGenericParams<T> { T leaky_alpha; ///< leaky_alpha // Methods using LinearCombinationGenericParams<T>::LinearCombinationGenericParams; CUTLASS_HOST_DEVICE Params(): LinearCombinationGenericParams<T>(), leaky_alpha(T(1)) {} CUTLASS_HOST_DEVICE Params( T alpha, T beta, T leaky_alpha = T(1) ): LinearCombinationGenericParams<T>(alpha, beta), leaky_alpha(leaky_alpha) {} }; CUTLASS_HOST_DEVICE T operator()(T const &value, T const & alpha_recip) const { T res = value > T(0) ? value : value * alpha_recip; return res; } CUTLASS_HOST_DEVICE T operator()(T const &value, Params const &params_) const { this->operator()(value, params_.leaky_alpha); } }; template <typename T, int N> struct LeakyReLU<Array<T, N> > { struct Params: LinearCombinationGenericParams<T> { T leaky_alpha; ///< leaky_alpha using LinearCombinationGenericParams<T>::LinearCombinationGenericParams; // Methods CUTLASS_HOST_DEVICE Params(): LinearCombinationGenericParams<T>(), leaky_alpha(T(1)) {} CUTLASS_HOST_DEVICE Params( T alpha, T beta, T leaky_alpha = T(1) ): LinearCombinationGenericParams<T>(alpha, beta), leaky_alpha(leaky_alpha) {} }; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value, T const & alpha_recip) const { Array<T, N> y; LeakyReLU<T> leaky_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < int(value.size()); ++i) { y[i] = leaky_op(value[i], alpha_recip); } return y; } CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value, Params const &params_) const { return this->operator()(value, params_.leaky_alpha); } }; // Tanh operator template <typename T> struct Tanh { CUTLASS_HOST_DEVICE T operator()(T const &scalar) const { return fast_tanh(scalar); } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE T operator()(T const &scalar, Params const &params_) const { return this->operator()(scalar); } }; template <typename T, int N> struct Tanh<Array<T, N> > { CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { Array<T, N> y; Tanh<T> tanh_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = tanh_op(value[i]); } return y; } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value, Params const &params_) const { return this->operator()(value); } }; template <int N> struct Tanh<Array<half_t, N>> { using T = half_t; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const& z) const { fast_tanh_op<Array<T, N>> tanh; return tanh(z); } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value, Params const &params_) const { return this->operator()(value); } }; // Sigmoid operator template <typename T> struct Sigmoid { CUTLASS_HOST_DEVICE T operator()(T const &scalar) const { return T(1) / (T(1) + fast_exp(-scalar)); } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE T operator()(T const &scalar, Params const &params_) const { return this->operator()(scalar); } }; template <typename T, int N> struct Sigmoid<Array<T, N> > { CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { Array<T, N> y; Sigmoid<T> sigmoid_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = sigmoid_op(value[i]); } return y; } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value, Params const &params_) const { return this->operator()(value); } }; template <int N> struct Sigmoid<Array<half_t, N>> { using T = half_t; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const& z) const { plus<Array<T, N>> add; #if defined(CUTLASS_USE_TANH_FOR_SIGMOID) multiplies<Array<T, N>> mul; fast_tanh_op<Array<T, N>> tanh; return mul(add(tanh(mul(z, cutlass::constants::half<T>())), cutlass::constants::one<T>()), cutlass::constants::half<T>()); #else divides<Array<T, N>> div; negate<Array<T, N>> neg; fast_exp_op<Array<T, N>> fast_exp; return div(cutlass::constants::one<T>(), add(cutlass::constants::one<T>(), fast_exp(neg(z)))); #endif } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &z, Params const &params_) const { return this->operator()(z); } }; // SiLu (swish) operator introduced by Elfwing et al. in the following paper // "Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning" (2017) // https://arxiv.org/pdf/1702.03118.pdf // It is used in EfficientNet and YOLOv5, for example. // Reference: https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html template <typename T> struct SiLu { CUTLASS_HOST_DEVICE T operator()(T const &scalar) const { Sigmoid<T> sigmoid; return scalar * sigmoid(scalar); } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE T operator()(T const &scalar, Params const &params_) const { return this->operator()(scalar); } }; template <typename T, int N> struct SiLu<Array<T, N>> { CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { Sigmoid<Array<T, N>> sigmoid_op; multiplies<Array<T, N>> mul; return mul(value, sigmoid_op(value)); } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value, Params const &params_) const { return this->operator()(value); } }; // Hardswish operator introduced by Howard et al. in the following paper // "Searching for MobileNetV3" (2019) // https://arxiv.org/pdf/1905.02244.pdf // It is used in models based on MobilenetNetV3. // Reference: https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html template <typename T> struct HardSwish { CUTLASS_HOST_DEVICE T operator()(T const &x) const { minimum<T> mn; maximum<T> mx; T relu6 = mn(mx(x + T(3), T(0)), T(6)); return x * relu6 / T(6); } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE T operator()(T const &x, Params const &params_) const { return this->operator()(x); } }; template <> struct HardSwish<float> { using T = float; CUTLASS_HOST_DEVICE T operator()(T const &x) const { minimum<T> mn; maximum<T> mx; T relu6 = mn(mx(x + T(3), T(0)), T(6)); return x * relu6 * 0.16666667f; } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE T operator()(T const &x, Params const &params_) const { return this->operator()(x); } }; template <typename T, int N> struct HardSwish<Array<T, N> > { CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { Array<T, N> y; HardSwish<T> hardswish_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = hardswish_op(value[i]); } return y; } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &x, Params const &params_) const { return this->operator()(x); } }; template <int N> struct HardSwish<Array<half_t, N> > { using T = half_t; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { minimum<Array<T, N> > mn; maximum<Array<T, N> > mx; multiplies<Array<T, N> > mul; plus<Array<T, N> > add; return mul(mul(mn(mx(add(value, T(3)), T(0)), T(6)), value), T(0.16666667f)); } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &x, Params const &params_) const { return this->operator()(x); } }; // // GELU function definitions implemented as described by // Hendrycks, D., and Gimpel, K. in // "Gaussian Error Linear Units (GELUs)." (2020) // https://arxiv.org/pdf/1606.08415.pdf // // Floating-point constants are Taylor coefficients described in the paper. // // GELU operator template <typename T> struct GELU { CUTLASS_HOST_DEVICE T operator()(T const &scalar) const { return T(cutlass::constants::half<T>() * scalar * (cutlass::constants::one<T>() + (T)erff((float)(scalar / cutlass::constants::root_two<T>())))); } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE T operator()(T const &scalar, Params const &params_) const { return this->operator()(scalar); } }; template <> struct GELU<float> { CUTLASS_HOST_DEVICE float operator()(float const &scalar) const { return cutlass::constants::half<float>() * scalar * (cutlass::constants::one<float>() + erff( scalar / cutlass::constants::root_two<float>() )); } using Params = LinearCombinationGenericParams<float>; CUTLASS_HOST_DEVICE float operator()(float const &scalar, Params const &params_) const { return this->operator()(scalar); } }; template <> struct GELU<double> { CUTLASS_HOST_DEVICE double operator()(double const &scalar) const { return cutlass::constants::half<double>() * scalar * (cutlass::constants::one<double>() + erf( scalar / cutlass::constants::root_two<double>() )); } using Params = LinearCombinationGenericParams<double>; CUTLASS_HOST_DEVICE double operator()(double const &scalar, Params const &params_) const { return this->operator()(scalar); } }; template <typename T, int N> struct GELU<Array<T, N> > { CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { Array<T, N> y; GELU<T> gelu_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = gelu_op(value[i]); } return y; } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value, Params const &params_) const { return this->operator()(value); } }; // GELU operator implemented using the Taylor series approximation template <typename T> struct GELU_taylor { static const bool kIsHeavy=true; CUTLASS_HOST_DEVICE T operator()(T const &z) const { T k0 = T(0.7978845608028654); T k1 = T(0.044715); return T(cutlass::constants::half<T>() * z * (cutlass::constants::one<T>() + fast_tanh(k0 * z * (cutlass::constants::one<T>() + k1 * z * z)))); } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE T operator()(T const &scalar, Params const &params_) const { return this->operator()(scalar); } }; template <int N> struct GELU_taylor<Array<half_t, N> > { static const bool kIsHeavy=true; CUTLASS_HOST_DEVICE Array<half_t, N> operator()(Array<half_t, N> const &z) const { using T = half_t; Array<half_t, N> y; half_t k0 = half_t(0.7978845608028654); half_t k1 = half_t(0.044715); multiply_add<Array<half_t, N>> fma; multiplies<Array<half_t, N>> mul; plus<Array<half_t, N>> add; fast_tanh_op<Array<half_t, N>> tanh; Array<half_t, N> u = mul(mul(k0, z), fma(mul(k1, z), z, cutlass::constants::one<T>())); y = mul(mul(z, cutlass::constants::half<T>()), add(cutlass::constants::one<T>(), tanh(u))); return y; } using Params = LinearCombinationGenericParams<half_t>; CUTLASS_HOST_DEVICE Array<half_t, N> operator()(Array<half_t, N> const &value, Params const &params_) const { return this->operator()(value); } }; template <typename T, int N> struct GELU_taylor<Array<T, N> > { static const bool kIsHeavy=true; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value) const { Array<T, N> y; GELU_taylor<T> gelu_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = gelu_op(value[i]); } return y; } using Params = LinearCombinationGenericParams<T>; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &value, Params const &params_) const { return this->operator()(value); } }; /// Computes backwards pass for GELU operator assuming d_t is the layer gradient and /// z is computed from the forward pass. template <typename T> struct dGELU { CUTLASS_HOST_DEVICE T operator()(T const &d_t, T const &z) const { T k0 = T(0.7978845608028654); T k1 = T(0.044715); T k2 = T(0.1070322243); T tanh_out = fast_tanh(k0 * z * (1 + k1 * z * z)); T ff = constants::half<T>() * z * ((1 - tanh_out * tanh_out) * (k0 + k2 * z * z)) + constants::half<T>() * (1 + tanh_out); return ff * d_t; } }; template <typename T, int N> struct dGELU<Array<T, N> > { CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &d_t, Array<T, N> const &z) const { Array<T, N> y; dGELU<T> gelu_op; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { y[i] = gelu_op(d_t[i], z[i]); } return y; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
18,909
C
25.784703
109
0.629965
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_clamp.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear scaling operations used by epilogues. Values are clamped before converting to the output element type. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { /// Single source of truth for whether to unroll for `LinearCombinationClamp()` constexpr bool LinearCombinationClampIsHeavy() { return false; } } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements then clamps the output before /// converting to the output element type. /// /// D = alpha * accumulator + beta * source + uniform /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class LinearCombinationClamp { public: using ElementOutput = ElementOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; static int const kCount = Count; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using ComputeFragment = Array<ElementCompute, kCount>; static FloatRoundStyle const kRound = Round; static bool const kIsHeavy = detail::LinearCombinationClampIsHeavy(); /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta ): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha ): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationClamp(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::Nothing) return false; return beta_ != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source, ElementCompute uniform = ElementCompute(0)) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; ComputeFragment converted_source = source_converter(source); ComputeFragment converted_accumulator = accumulator_converter(accumulator); // Perform binary operations ComputeFragment intermediate; multiplies<ComputeFragment> mul_add_source; multiply_add<ComputeFragment> mul_add_accumulator; minimum<ComputeFragment> min_accumulator; maximum<ComputeFragment> max_accumulator; if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } /// Clamping constant value ElementCompute const kClampMax = ElementCompute(platform::numeric_limits<ElementOutput>::max()); ElementCompute const kClampMin = ElementCompute(platform::numeric_limits<ElementOutput>::lowest()); intermediate = max_accumulator(intermediate, kClampMin); intermediate = min_accumulator(intermediate, kClampMax); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; ComputeFragment converted_accumulator = accumulator_converter(accumulator); // Perform binary operations ComputeFragment intermediate; multiplies<ComputeFragment> mul_accumulator; minimum<ComputeFragment> min_accumulator; maximum<ComputeFragment> max_accumulator; if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum } /// Clamping constant value ElementCompute const kClampMax = ElementCompute(platform::numeric_limits<ElementOutput>::max()); ElementCompute const kClampMin = ElementCompute(platform::numeric_limits<ElementOutput>::lowest()); intermediate = max_accumulator(intermediate, kClampMin); intermediate = min_accumulator(intermediate, kClampMax); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Conditional guards to enable partial specialization for packed integers #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 720) && ((__CUDACC_VER_MAJOR__ > 10) || ((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2))) /// Applies a linear combination operator to an array of elements then clamps the output before /// converting to the output element type. /// /// D = alpha * accumulator + beta * source + uniform /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ScaleType::Kind Scale, ///< Control Alpha and Beta scaling FloatRoundStyle Round > class LinearCombinationClamp<ElementOutput_, Count, int, float, Scale, Round> { public: using ElementOutput = ElementOutput_; using ElementAccumulator = int; using ElementCompute = float; static_assert( platform::numeric_limits<ElementOutput>::is_integer, "This elementwise op expects the output to be int."); static int const kCount = Count; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using ComputeFragment = Array<ElementCompute, kCount>; static FloatRoundStyle const kRound = Round; static bool const kIsHeavy = detail::LinearCombinationClampIsHeavy(); /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta ): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha ): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationClamp(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::Nothing) return false; return beta_ != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source, ElementCompute uniform = ElementCompute(0)) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; ComputeFragment converted_source = source_converter(source); ComputeFragment converted_accumulator = accumulator_converter(accumulator); // Compute linear scaling in floating point ComputeFragment intermediate; multiplies<ComputeFragment> mul_add_source; multiply_add<ComputeFragment> mul_add_accumulator; // Float min-max if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } // Convert floats back to INT FragmentAccumulator scaled_accumulator; NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter; scaled_accumulator = compute_converter(intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, int, kCount, Round> destination_converter; return destination_converter(scaled_accumulator); } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentOutput operator()(FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; ComputeFragment converted_accumulator = accumulator_converter(accumulator); // Compute linear scaling in floating point ComputeFragment intermediate; multiplies<ComputeFragment> mul_add_accumulator; // Float min-max if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_accumulator(alpha_, converted_accumulator); // D = alpha * Accum } // Convert floats back to INT FragmentAccumulator scaled_accumulator; NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter; scaled_accumulator = compute_converter(intermediate); // Convert to destination numeric type NumericArrayConverter<ElementOutput, int, kCount, Round> destination_converter; return destination_converter(scaled_accumulator); } }; #endif // Conditional guards to enable partial specialization for packed integers //////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements then clamps /// the output before converting to the output element type. /// /// D = alpha * accumulator + beta * source + uniform /// /// Note: The below method only when problem_size_K <= 256 for signed int8 gemm /// or problem_size_K <= 128 for unsigned int8 gemm. The default approach is /// above. /// TODO: Add logic to fallback to the default approach template < /// Data type used to load and store< tensors typename ElementOutput_, /// Number of elements computed per operation int Count, ///< Control Alpha and Beta scaling ScaleType::Kind Scale = ScaleType::Default, /// Rounding mode FloatRoundStyle Round = FloatRoundStyle::round_to_nearest> class FastLinearCombinationClamp { public: using ElementOutput = ElementOutput_; using ElementAccumulator = int; using ElementCompute = float; static_assert( platform::numeric_limits<ElementOutput>::is_integer, "This elementwise op expects the output to be int."); static int const kCount = Count; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using ComputeFragment = Array<ElementCompute, kCount>; static FloatRoundStyle const kRound = Round; static bool const kIsHeavy = false; /// Host-constructable parameters structure struct Params { /// scales accumulators ElementCompute alpha; /// scales source tensor ElementCompute beta; /// pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *alpha_ptr; /// pointer to source scalar - if not null, loads it from memory ElementCompute const *beta_ptr; // // Methods // CUTLASS_HOST_DEVICE Params() : alpha(ElementCompute(1)), beta(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) {} CUTLASS_HOST_DEVICE Params(ElementCompute alpha, ElementCompute beta) : alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) {} CUTLASS_HOST_DEVICE Params(ElementCompute alpha) : alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) {} CUTLASS_HOST_DEVICE Params(ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr) : alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {} CUTLASS_HOST_DEVICE Params(ElementCompute const *alpha_ptr) : alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) {} }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; public: /// Constructs the function object, possibly loading from pointers in host /// memory CUTLASS_HOST_DEVICE FastLinearCombinationClamp(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::Nothing) return false; return beta_ != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()(FragmentAccumulator const &accumulator, FragmentOutput const &source, ElementCompute uniform = ElementCompute(0)) const { // Convert source to interal compute numeric type FastNumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; FastNumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; ComputeFragment converted_source = source_converter(source); ComputeFragment converted_accumulator = accumulator_converter(accumulator); // Compute linear scaling in floating point ComputeFragment intermediate; multiplies<ComputeFragment> mul_add_source; multiply_add<ComputeFragment> mul_add_accumulator; minimum<ComputeFragment> min_accumulator; maximum<ComputeFragment> max_accumulator; // Float min-max if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } /// Clamping constant value ElementCompute const kClamp = ElementCompute(1 << (sizeof_bits<ElementOutput>::value - 1)); intermediate = max_accumulator(intermediate, -kClamp); intermediate = min_accumulator(intermediate, kClamp - ElementCompute(1)); // Convert to destination numeric type FastNumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()(FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type FastNumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; ComputeFragment converted_accumulator = accumulator_converter(accumulator); // Compute linear scaling in floating point ComputeFragment intermediate; multiplies<ComputeFragment> mul_accumulator; minimum<ComputeFragment> min_accumulator; maximum<ComputeFragment> max_accumulator; // Float min-max if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_accumulator(alpha_, converted_accumulator); } /// Clamping constant value ElementCompute const kClamp = ElementCompute(1 << (sizeof_bits<ElementOutput>::value - 1)); intermediate = max_accumulator(intermediate, -kClamp); intermediate = min_accumulator(intermediate, kClamp - ElementCompute(1)); // Convert to destination numeric type FastNumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
23,649
C
33.07781
150
0.672629
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_leaky_relu.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/activation.h" #include "cutlass/epilogue/thread/scale_type.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements. /// /// D = alpha * accumulator + beta * source + uniform /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class LinearCombinationLeakyRelu { public: using ElementOutput = ElementOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; static int const kCount = Count; static const ScaleType::Kind kScale = Scale; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using ComputeFragment = Array<ElementCompute, kCount>; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta_bias; ///< scales bias tensor ElementCompute leaky_alpha; ///< leaky_alpha // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta_bias(ElementCompute(0)), leaky_alpha(ElementCompute(1)) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta_bias, ElementCompute leaky_alpha = ElementCompute(1) ): alpha(alpha), beta_bias(beta_bias), leaky_alpha(leaky_alpha) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_bias_; ElementCompute leaky_alpha_recip_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationLeakyRelu(Params const &params) { alpha_ = (params.alpha); beta_bias_ = (params.beta_bias); leaky_alpha_recip_ = (ElementCompute(params.leaky_alpha)); } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::Nothing) return false; return beta_bias_ != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition) { if (k_partition) { beta_bias_ = ElementCompute(1); } } CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_bias_ = ElementCompute(1); } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; ComputeFragment converted_source = source_converter(source); ComputeFragment converted_accumulator = accumulator_converter(accumulator); // Perform binary operations ComputeFragment intermediate; multiplies<ComputeFragment> mul_add_source; multiply_add<ComputeFragment> mul_add_accumulator; LeakyReLU<ComputeFragment> leakyrelu; if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_add_source(beta_bias_, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X } // Compute threshold optionally intermediate = leakyrelu(intermediate, leaky_alpha_recip_); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; ComputeFragment converted_accumulator = accumulator_converter(accumulator); // Perform binary operations ComputeFragment intermediate; multiplies<ComputeFragment> mul_accumulator; LeakyReLU<ComputeFragment> leakyrelu; //printf("in doing with bias"); if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum } // Compute threshold optionally intermediate = leakyrelu(intermediate, leaky_alpha_recip_); // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; return destination_converter(intermediate); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
8,344
C
35.125541
116
0.661074
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_drelu.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination with a maximum operation used by epilogues. */ #pragma once #include <cutlass/half.h> #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/activation.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements. /// /// D = alpha * accumulator + beta * source + uniform /// template < typename ElementCompute_, ///< Data type returned by this functor typename ElementAccumulator_, ///< Data type of accumulators typename ElementSource_, ///< Data type of source tensor typename ElementTensor_, ///< Data type of additional tensor int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class LinearCombinationDRelu { public: using ElementOutput = ElementSource_; using ElementCompute = ElementCompute_; using ElementAccumulator = ElementAccumulator_; using ElementSource = ElementSource_; using ElementTensor = ElementTensor_; static int const kCount = Count; using FragmentCompute = Array<ElementCompute, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using FragmentSource = Array<ElementSource, kCount>; using FragmentTensor = Array<ElementTensor, kCount>; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute threshold; ///< minimum value that is output ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), threshold(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta, ElementCompute threshold = ElementCompute(0) ): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr, ElementCompute threshold = ElementCompute(0) ): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; ElementTensor threshold_; bool participates_in_reduction_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationDRelu(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); threshold_ = ElementTensor(params.threshold); participates_in_reduction_ = true; } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { return beta_ != ElementCompute(0); } /// Returns true if the threadblock computes the reduction CUTLASS_HOST_DEVICE bool participates_in_reduction() const { return participates_in_reduction_; } /// Functionally required for serial reduction in the epilogue CUTLASS_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { // set to NaN to make ReLU no-op for all except last k partitions int64_t allones = -1; threshold_ = reinterpret_cast<ElementTensor const &>(allones); participates_in_reduction_ = false; } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentCompute operator()( FragmentAccumulator const &accumulator, FragmentSource const &source, FragmentTensor const &tensor) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementSource, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_source = source_converter(source); FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_add_source; multiply_add<FragmentCompute> mul_add_accumulator; intermediate = mul_add_source(beta_, converted_source); // X = beta * C intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X // dReLU = (cond ? dy : 0) CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { ElementTensor cond = tensor[i]; if (cond <= threshold_) { intermediate[i] = ElementCompute(); } } return intermediate; } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentCompute operator()( FragmentAccumulator const &accumulator, FragmentTensor const &tensor) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_accumulator; intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum // dReLU = (cond ? dy : 0) CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { ElementTensor cond = tensor[i]; if (cond <= threshold_) { intermediate[i] = ElementCompute(); } } return intermediate; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements. /// /// D = alpha * accumulator + beta * source + uniform /// template < typename ElementCompute_, ///< Data type returned by this functor typename ElementAccumulator_, ///< Data type of accumulators typename ElementSource_, ///< Data type of source tensor int Count, ///< Number of elements computed per operation FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class LinearCombinationDReluConditionalBits { public: using ElementOutput = ElementSource_; using ElementCompute = ElementCompute_; using ElementAccumulator = ElementAccumulator_; using ElementSource = ElementSource_; using ElementTensor = uint1b_t; static bool const kIsHeavy = false; static int const kCount = Count; using FragmentCompute = Array<ElementCompute, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using FragmentSource = Array<ElementSource, kCount>; using FragmentTensor = Array<ElementTensor, kCount>; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta ): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; FragmentTensor predicate_mask_; bool participates_in_reduction_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationDReluConditionalBits(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); participates_in_reduction_ = true; predicate_mask_.clear(); } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { return beta_ != ElementCompute(0); } /// Returns true if the threadblock computes the reduction CUTLASS_HOST_DEVICE bool participates_in_reduction() const { return participates_in_reduction_; } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { predicate_mask_.clear(); if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { // Avoid computing the reduction if this isn't the final Split-K slice participates_in_reduction_ = false; bit_not<FragmentTensor> not_op; predicate_mask_ = not_op(predicate_mask_); } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_DEVICE FragmentCompute operator()( FragmentAccumulator const &accumulator, FragmentSource const &source, FragmentTensor const &tensor) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementSource, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_source = source_converter(source); FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_add_source; multiply_add<FragmentCompute> mul_add_accumulator; intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X bit_or<FragmentTensor> or_op; FragmentTensor predicates = or_op(tensor, predicate_mask_); // Obtain from packed bits bool conditions[kCount]; UnpackPredicates<kCount> unpack_predicates; unpack_predicates(conditions, predicates); // dReLU = (cond ? dy : 0) CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { if (!conditions[i]) { intermediate[i] = ElementCompute(); } } return intermediate; } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentCompute operator()( FragmentAccumulator const &accumulator, FragmentTensor const &tensor) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_accumulator; intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum bit_or<FragmentTensor> or_op; FragmentTensor predicates = or_op(tensor, predicate_mask_); // Obtain from packed bits bool conditions[kCount]; UnpackPredicates<kCount> unpack_predicates; unpack_predicates(conditions, predicates); // dReLU = (cond ? dy : 0) CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { if (!conditions[i]) { intermediate[i] = ElementCompute(); } } return intermediate; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
15,195
C
32.545254
129
0.650214
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/scale_type.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Enum defines the behaviors of the epilogue. */ #pragma once #include "cutlass/cutlass.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Specifies internal data type for computation struct ScaleType { enum Kind { Default, // alpha x C + beta x D NoBetaScaling, // alpha x C + D OnlyAlphaScaling, // alpha x C OnlyAlphaPerChannelScaling, // alpha_vec x C Nothing // C }; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
2,656
C
41.174603
100
0.604669
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_hardswish.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination with HardSwish operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/epilogue/thread/activation.h" #include "cutlass/epilogue/thread/linear_combination_generic.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator followed by the HardSwish activation to an array of elements. /// /// D = hardswish(alpha * accumulator + beta * source + uniform) /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > using LinearCombinationHardSwish = LinearCombinationGeneric<HardSwish, ElementOutput_, Count, ElementAccumulator_, ElementCompute_, Scale, Round>; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
3,693
C
51.771428
129
0.610073
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_params.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// struct LinearCombinationParams { uint64_t alpha_data[2]; uint64_t beta_data[2]; CUTLASS_HOST_DEVICE LinearCombinationParams() : alpha_data {0lu, 0lu}, beta_data {0lu, 0lu} { } template <typename ElementCompute> CUTLASS_HOST_DEVICE LinearCombinationParams(ElementCompute alpha, ElementCompute beta) : alpha_data {0lu, 0lu}, beta_data {0lu, 0lu} { #if defined(__CUDA_ARCH__) reinterpret_cast<ElementCompute&>(alpha_data) = alpha; reinterpret_cast<ElementCompute&>(beta_data) = beta; #else memcpy( alpha_data, &alpha, sizeof(ElementCompute) ); memcpy( beta_data, &beta, sizeof(ElementCompute) ); #endif } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
3,058
C
39.249999
100
0.599411
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/reduction_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing reduction operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a reduction sum to an array of elements. /// /// template < typename Element_, ///< Data type used to load and store tensors int Count ///< Number of elements computed per operation > class ReductionOpPlus { public: using Element = Element_; static int const kCount = Count; using Fragment = Array<Element, kCount>; using Operator = plus<Fragment>; /// Host-constructable parameters structure struct Params { }; private: /// reduction operator Operator operator_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE ReductionOpPlus(Params const &params) { } /// Computes Compute => CUTLASS_HOST_DEVICE Fragment operator()( Fragment const &lhs, Fragment const &rhs) const { return operator_(lhs, rhs); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
3,416
C
33.867347
100
0.624707
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/conversion_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing conversion operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Converts the result without other operations /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class Convert { public: using ElementOutput = ElementOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementAccumulator_; static int const kCount = Count; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using ComputeFragment = FragmentAccumulator; static FloatRoundStyle const kRound = Round; static bool const kIsHeavy = false; /// Host-constructable parameters structure struct Params { // // Methods // CUTLASS_HOST_DEVICE Params() {} }; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE Convert(Params const &params = Params()) { } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { } /// Returns true if source is needed based on state of runtime arguments CUTLASS_HOST_DEVICE constexpr bool is_source_needed() const { return false; } /// Constexpr function to enable the compiler to optimize away the source loading if it is /// never needed. CUTLASS_HOST_DEVICE constexpr bool is_source_ever_needed() const { return false; } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source = FragmentOutput(), ElementCompute uniform = ElementCompute(0)) const { // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementAccumulator, kCount, Round> destination_converter; return destination_converter(accumulator); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
4,691
C
34.278195
101
0.658708
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_dgelu.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination followed by dGelu operation */ #pragma once #include <cutlass/half.h> #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/constants.h" #include "cutlass/fast_math.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/activation.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements. /// /// D = alpha * accumulator + beta * source + uniform /// template < typename ElementCompute_, ///< Data type returned by this functor typename ElementAccumulator_, ///< Data type of accumulators typename ElementSource_, ///< Data type of source tensor typename ElementTensor_, ///< Data type of additional tensor int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class LinearCombinationDGelu { public: using ElementOutput = ElementSource_; using ElementCompute = ElementCompute_; using ElementAccumulator = ElementAccumulator_; using ElementSource = ElementSource_; using ElementTensor = ElementTensor_; static bool const kIsHeavy = true; static int const kCount = Count; using FragmentCompute = Array<ElementCompute, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using FragmentSource = Array<ElementSource, kCount>; using FragmentTensor = Array<ElementTensor, kCount>; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute threshold; ///< minimum value that is output ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), threshold(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta, ElementCompute threshold = ElementCompute(0) ): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr, ElementCompute threshold = ElementCompute(0) ): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; ElementCompute threshold_; bool participates_in_reduction_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationDGelu(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); threshold_ = params.threshold; participates_in_reduction_ = true; } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { return beta_ != ElementCompute(0); } /// Returns true if the threadblock computes the reduction CUTLASS_HOST_DEVICE bool participates_in_reduction() const { return participates_in_reduction_; } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { // set to NaN to make ReLU no-op for all except last k partitions int64_t allones = -1; threshold_ = reinterpret_cast<ElementCompute const &>(allones); // Avoid computing the reduction if this isn't the final Split-K slice participates_in_reduction_ = false; } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentCompute operator()( FragmentAccumulator const &accumulator, FragmentSource const &source, FragmentTensor const &tensor) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementSource, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_source = source_converter(source); FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_add_source; multiply_add<FragmentCompute> mul_add_accumulator; intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X dGELU<ElementCompute> gelu_op; // dGelu CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { intermediate[i] = gelu_op(intermediate[i], ElementCompute(tensor[i])); } return intermediate; } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentCompute operator()( FragmentAccumulator const &accumulator, FragmentTensor const &tensor) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_accumulator; intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum dGELU<ElementCompute> gelu_op; // dGelu with conversion CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { intermediate[i] = gelu_op(intermediate[i], ElementCompute(tensor[i])); } return intermediate; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
9,067
C
35.12749
129
0.647292
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_silu.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination with SiLU operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/epilogue/thread/activation.h" #include "cutlass/epilogue/thread/linear_combination_generic.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator folllowed by the SiLU activation to an array of elements. /// /// D = silu(alpha * accumulator + beta * source + uniform) /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > using LinearCombinationSilu = LinearCombinationGeneric<SiLu, ElementOutput_, Count, ElementAccumulator_, ElementCompute_, Scale, Round, true>; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
3,669
C
51.428571
129
0.608613
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_gelu.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination with GELU operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/epilogue/thread/activation.h" #include "cutlass/epilogue/thread/linear_combination_generic.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator followed by the GELU activation to an array of elements. /// /// D = gelu(alpha * accumulator + beta * source + uniform) /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > using LinearCombinationGELU = LinearCombinationGeneric<GELU, ElementOutput_, Count, ElementAccumulator_, ElementCompute_, Scale, Round, true>; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
3,669
C
50.69014
129
0.60834
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination_with_elementwise.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination with elementwise */ #pragma once #include <cutlass/half.h> #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/constants.h" #include "cutlass/fast_math.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/activation.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements. /// /// D = alpha * accumulator + beta * source + uniform /// template < typename ElementCompute_, ///< Data type returned by this functor typename ElementAccumulator_, ///< Data type of accumulators typename ElementSource_, ///< Data type of source tensor typename ElementTensor_, ///< Data type of additional tensor int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class LinearCombinationWithElementwise { public: using ElementOutput = ElementSource_; using ElementCompute = ElementCompute_; using ElementAccumulator = ElementAccumulator_; using ElementSource = ElementSource_; using ElementTensor = ElementTensor_; static bool const kIsHeavy = true; static int const kCount = Count; using FragmentCompute = Array<ElementCompute, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using FragmentSource = Array<ElementSource, kCount>; using FragmentTensor = Array<ElementTensor, kCount>; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute threshold; ///< minimum value that is output ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), threshold(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta, ElementCompute threshold = ElementCompute(0) ): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr, ElementCompute threshold = ElementCompute(0) ): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; ElementCompute threshold_; bool participates_in_reduction_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationWithElementwise(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); threshold_ = params.threshold; participates_in_reduction_ = true; } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { return beta_ != ElementCompute(0); } /// Returns true if the threadblock computes the reduction CUTLASS_HOST_DEVICE bool participates_in_reduction() const { return participates_in_reduction_; } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { // set to NaN to make ReLU no-op for all except last k partitions int64_t allones = -1; threshold_ = reinterpret_cast<ElementCompute const &>(allones); // Avoid computing the reduction if this isn't the final Split-K slice participates_in_reduction_ = false; } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentCompute operator()( FragmentAccumulator const &accumulator, FragmentSource const &source, FragmentTensor const &tensor) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementSource, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_source = source_converter(source); FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_add_source; multiply_add<FragmentCompute> mul_add_accumulator; intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X return intermediate; } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentCompute operator()( FragmentAccumulator const &accumulator, FragmentTensor const &tensor) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> mul_accumulator; intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum return intermediate; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
8,662
C
35.86383
129
0.649965
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/thread/linear_combination.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/scale_type.h" #include "cutlass/epilogue/thread/linear_combination_params.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements. /// /// D = alpha * accumulator + beta * source + uniform /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation. ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class LinearCombination { public: using ElementOutput = ElementOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; static int const kCount = Count; static const ScaleType::Kind kScale = Scale; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using ComputeFragment = Array<ElementCompute, kCount>; using ParamsBase = LinearCombinationParams; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure struct Params : ParamsBase{ ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory CUTLASS_HOST_DEVICE Params(): ParamsBase( ElementCompute(1), ElementCompute(0) ), alpha(ElementCompute(1)), beta(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta ): ParamsBase(alpha, beta), alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha ): ParamsBase(alpha, ElementCompute(0)), alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr ): ParamsBase(*alpha_ptr, *beta_ptr), alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr ): ParamsBase(*alpha_ptr, ElementCompute(0)), alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ParamsBase const& base ): ParamsBase(base), alpha_ptr(nullptr), beta_ptr(nullptr) { #if defined(__CUDA_ARCH__) alpha = reinterpret_cast<ElementCompute const&>(base.alpha_data); beta = reinterpret_cast<ElementCompute const&>(base.beta_data); #else memcpy( alpha, base.alpha_data, sizeof(ElementCompute) ); memcpy( beta, base.alpha_data, sizeof(ElementCompute) ); #endif } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombination(Params const &params) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::Nothing) return false; return beta_ != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; ComputeFragment converted_source = source_converter(source); ComputeFragment converted_accumulator = accumulator_converter(accumulator); if (Scale == ScaleType::Nothing) return destination_converter(converted_accumulator); // Perform binary operations ComputeFragment intermediate; multiplies<ComputeFragment> mul_add_source; multiply_add<ComputeFragment> mul_add_accumulator; if (Scale == ScaleType::NoBetaScaling) intermediate = converted_source; else intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X return destination_converter(intermediate); } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; ComputeFragment converted_accumulator = accumulator_converter(accumulator); if (Scale == ScaleType::Nothing) return destination_converter(converted_accumulator); // Perform binary operations ComputeFragment intermediate; multiplies<ComputeFragment> mul_accumulator; intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum return destination_converter(intermediate); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
9,349
C
36.103174
129
0.653332
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/output_iterator_parameter.h
#pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/tensor_ref.h" namespace cutlass { namespace epilogue { namespace threadblock { template< typename TensorLayout_, ///! The original output tensor layout typename OutputIteratorLayout_, ///! Layout used by epilogue output iterator typename TensorRef_, ///! Input tensor to epilogue output iterator conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad) typename ConvProblemSize_ ///! Convolutional operator on 2D or 3D problem > struct ConvOutputIteratorParameter { using TensorLayout = TensorLayout_; using OutputIteratorLayout = OutputIteratorLayout_; using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord; using TensorRef = TensorRef_; static conv::Operator const kConvolutionalOperator = ConvOperator; using ConvProblemSize = ConvProblemSize_; /// Wgrad stride idx for implicit gemm algorithm // Conv2d row-major matrix (KxRSC) // Conv3d row-major matrix (KxTRSC) static int const kWgradStrideIdx = platform::is_same<TensorLayout, layout::TensorNHWC>::value ? 2 : 3; /// This chooses the appropriate stride element of the C tensor. static int const kTensorStrideIdx = (kConvolutionalOperator == conv::Operator::kWgrad ? kWgradStrideIdx : 0); CUTLASS_HOST_DEVICE static OutputIteratorLayout layout(const TensorRef & ref) { return ref.stride(kTensorStrideIdx); } CUTLASS_HOST_DEVICE static OutputTensorCoord extent(ConvProblemSize problem_size) { return conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(); } }; template < int InterleavedK, typename TensorRef_, conv::Operator ConvOperator, typename ConvProblemSize_ > struct ConvOutputIteratorParameter< layout::TensorNCxHWx<InterleavedK>, layout::TensorNCxHWx<InterleavedK>, TensorRef_, ConvOperator, ConvProblemSize_> { using TensorLayout = typename layout::TensorNCxHWx<InterleavedK>; using OutputIteratorLayout = typename layout::TensorNCxHWx<InterleavedK>; using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord; using TensorRef = TensorRef_; static conv::Operator const kConvolutionalOperator = ConvOperator; using ConvProblemSize = ConvProblemSize_; CUTLASS_HOST_DEVICE static OutputIteratorLayout layout(const TensorRef & ref) { return ref.stride(); } CUTLASS_HOST_DEVICE static OutputTensorCoord extent(ConvProblemSize problem_size) { return problem_size.output_extent(); } }; } // namespace threadblock } // namespace epilogue } // namespace cutlass
2,912
C
30.32258
103
0.729052
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_tensor_op_blas3.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_clamp.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/epilogue/thread/linear_combination_gelu.h" #include "cutlass/epilogue/thread/linear_combination_sigmoid.h" #include "cutlass/epilogue/thread/linear_combination_planar_complex.h" #include "cutlass/epilogue/thread/conversion_op.h" #include "cutlass/epilogue/thread/reduction_op.h" #include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h" #include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h" #include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h" #include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_blas3.h" #include "cutlass/epilogue/threadblock/shared_load_iterator.h" #include "cutlass/epilogue/threadblock/shared_load_iterator_mixed.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/threadblock/interleaved_epilogue.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess, /// Is for a symmetric kernel BlasMode BlasMode_ = BlasMode::kGemm > struct DefaultEpilogueTensorOpBlas3 { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; static BlasMode const kBlasMode = BlasMode_; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorBlas3< OutputTileThreadMap, ElementOutput, kBlasMode >; using AccumulatorFragmentIterator = typename std::conditional<is_complex<ElementOutput>::value, cutlass::epilogue::warp::FragmentIteratorComplexTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC>, cutlass::epilogue::warp::FragmentIteratorTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC> >::type; /// Support several implementations depending on structure of epilogue using DefaultIterators = detail::DefaultIteratorsTensorOp< ElementOutput, ElementAccumulator, kElementsPerAccess, Shape, typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename OutputTileThreadMap::CompactedThreadMap >; using WarpTileIterator = typename DefaultIterators::WarpTileIterator; using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator; /// Hard-coded padding elements added using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
7,129
C
39.511363
100
0.670501
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped complex GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/epilogue/thread/linear_combination_gelu.h" #include "cutlass/epilogue/thread/linear_combination_sigmoid.h" #include "cutlass/epilogue/thread/linear_combination_planar_complex.h" #include "cutlass/epilogue/thread/conversion_op.h" #include "cutlass/epilogue/thread/reduction_op.h" #include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h" #include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h" #include "cutlass/epilogue/warp/fragment_iterator_gaussian_complex_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_tensor_op.h" #include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/epilogue/threadblock/shared_load_iterator.h" #include "cutlass/epilogue/threadblock/epilogue.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Specialization and defines sensible defaults for epilogues for complex*complex case // 4 real-valued mma operations (Complex) // A = (ar + j ai), B (br +j bi), D = AB // D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Epilouge Shape typename Shape_, /// Warp-level mma operator typename WarpMmaTensorOp_, /// Number of k partitions int PartitionsK, /// Epilogue output operator typename OutputOp_, /// Elements accessed by inner-most loop of AccumulatorFragmentIterator::load() int ElementsPerAccess, /// Multiply-add operator /// Selects between (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) typename Operator_ = arch::OpMultiplyAddComplex > struct DefaultEpilogueComplexTensorOp { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using Operator = Operator_; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< OutputTileThreadMap, ElementOutput >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorComplexTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, ElementAccumulator, LayoutC >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator >; /// Hard-coded padding elements added using Padding = cutlass::MatrixShape<0, 0>; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization and defines sensible defaults for epilogues for complex*complex case // 3 real-valued mma operations (Gaussian Complex) // A = (ar + j ai), B = (br +j bi), D = AB // P1 = (ar + ai) * br, P2 = - ar * (br - bi), P3 = ai * (br + bi) // D = dr + j di = (P1 - P3) + j (P1 + P2) ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess > struct DefaultEpilogueComplexTensorOp <Shape_, WarpMmaTensorOp_, PartitionsK, OutputOp_, ElementsPerAccess, arch::OpMultiplyAddGaussianComplex > { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using Operator = arch::OpMultiplyAddGaussianComplex; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< OutputTileThreadMap, ElementOutput >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorGaussianComplexTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, ElementAccumulator, LayoutC >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator >; /// Hard-coded padding elements added using Padding = cutlass::MatrixShape<0, 0>; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
9,142
C
34.714844
103
0.677532
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/interleaved_epilogue.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/epilogue/threadblock/epilogue_base_streamk.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator without splitk template < /// Shape of threadblock tile (concept: GemmShape) typename Shape_, /// Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) typename WarpMmaOperator_, /// Number of partitions of the K dimension int PartitionsK, /// Tile iterator reading and writing output tensors typename OutputTileIterator_, /// Fragment iterator selecting accumulators typename AccumulatorFragmentIterator_, /// Output operator typename OutputOp_, /// Number of interleaved k int InterleavedK> class InterleavedEpilogue : public EpilogueBaseStreamK< Shape_, PartitionsK, WarpMmaOperator_, AccumulatorFragmentIterator_> { public: using BaseStreamK = EpilogueBaseStreamK< Shape_, PartitionsK, WarpMmaOperator_, AccumulatorFragmentIterator_>; using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; static int const kPartitionsK = PartitionsK; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using OutputTileIterator = OutputTileIterator_; using OutputOp = OutputOp_; /// The complete warp-level accumulator tile using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile; /// Fragment type used by the accumulator tile's fragment iterator using AccumulatorFragment = typename AccumulatorFragmentIterator::Fragment; /// Accumulator element using ElementAccumulator = typename AccumulatorTile::Element; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; /// Tensor reference to destination tensor using TensorRef = typename OutputTileIterator::TensorRef; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Const tensor reference to source tensor using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; /// Array type used to output using OutputAccessType = Array<typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using AccumulatorAccessType = Array<ElementAccumulator, OutputTileIterator::kElementsPerAccess>; /// Number of warps using WarpCount = gemm::GemmShape<Shape::kM / WarpMmaOperator::Shape::kM, Shape::kN / WarpMmaOperator::Shape::kN, kPartitionsK>; public: static_assert(OutputTileIterator::kElementsPerAccess, "This must not be zero."); static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess), "Divisibility"); /// Shared storage allocation needed by the epilogue struct SharedStorage {}; public: /// Constructor CUTLASS_DEVICE InterleavedEpilogue( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx) ///< Id of thread within warp : BaseStreamK(thread_idx) {} /// Aggregates the accumulator sets shared by peer blocks in the global workspace, /// performing epilogue computations, writing to output CUTLASS_DEVICE void reduce( int peer_idx_begin, int peer_idx_end, int reduce_fragment_idx, void *element_workspace, OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination OutputTileIterator source_iterator) ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) { // Redcuce peer accumulator fragments into one fragment AccumulatorFragment accum_fragment; BaseStreamK::reduce(accum_fragment, peer_idx_begin, peer_idx_end, reduce_fragment_idx, element_workspace); // Source-fragment data (zero-initialized for scenarios where the // output operator allows us to skip loading it from global input) typename OutputTileIterator::Fragment source_fragment; source_fragment.clear(); if (output_op.is_source_needed()) { source_iterator += reduce_fragment_idx; source_iterator.load(source_fragment); } // Compute the output result typename OutputTileIterator::Fragment output_fragment; // Apply the output operator apply_output_operator(output_fragment, output_op, accum_fragment, source_fragment); // Store the final result destination_iterator += reduce_fragment_idx; destination_iterator.store(output_fragment); } /// Streams the result to global memory CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) if (!output_op.is_source_needed()) { compute_source_not_needed_(output_op, destination_iterator, accumulators); } else { compute_source_needed_(output_op, destination_iterator, accumulators, source_iterator); } } /// Streams the result to global memory CUTLASS_DEVICE void compute_source_not_needed_( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators ///< Complete warp-level accumulator tile ) { // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // CUTLASS_PRAGMA_UNROLL for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) { // // Convert fragment // typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); ++accum_fragment_iterator; // // Compute the output result // typename OutputTileIterator::Fragment output_fragment; apply_output_operator_source_not_needed(output_fragment, output_op, accum_fragment); // // Store the final result // destination_iterator.set_iteration_index(iter); destination_iterator.store(output_fragment); ++destination_iterator; } } /// Streams the result to global memory CUTLASS_DEVICE void compute_source_needed_( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) ) { // // Predicated tile iterators constructed from members // typename OutputTileIterator::Fragment source_fragment; source_fragment.clear(); // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // CUTLASS_PRAGMA_UNROLL for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) { // // Load the source // source_iterator.set_iteration_index(iter); source_iterator.load(source_fragment); ++source_iterator; // // Convert fragment // typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); ++accum_fragment_iterator; // // Compute the output result // typename OutputTileIterator::Fragment output_fragment; apply_output_operator(output_fragment, output_op, accum_fragment, source_fragment); // // Store the final result // destination_iterator.set_iteration_index(iter); destination_iterator.store(output_fragment); ++destination_iterator; } } protected: /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator( typename OutputTileIterator::Fragment &output_fragment, OutputOp const &output_op, typename AccumulatorFragmentIterator::Fragment const &aligned_accum_fragment, typename OutputTileIterator::Fragment const &source_fragment) { OutputAccessType *output_frag_ptr = reinterpret_cast<OutputAccessType *>(&output_fragment); AccumulatorAccessType const *compute_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>( &aligned_accum_fragment); OutputAccessType const *source_frag_ptr = reinterpret_cast<OutputAccessType const *>(&source_fragment); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { // Call the output operator output_frag_ptr[i] = output_op(compute_frag_ptr[i], source_frag_ptr[i]); } } /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_source_not_needed( typename OutputTileIterator::Fragment &output_fragment, OutputOp const &output_op, typename AccumulatorFragmentIterator::Fragment const &aligned_accum_fragment) { OutputAccessType *output_frag_ptr = reinterpret_cast<OutputAccessType *>(&output_fragment); AccumulatorAccessType const *compute_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>( &aligned_accum_fragment); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { // Call the output operator output_frag_ptr[i] = output_op(compute_frag_ptr[i]); } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
13,546
C
33.383249
126
0.675624
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_simt.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using SIMT. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_clamp.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/epilogue/thread/linear_combination_gelu.h" #include "cutlass/epilogue/thread/linear_combination_sigmoid.h" #include "cutlass/epilogue/thread/linear_combination_planar_complex.h" #include "cutlass/epilogue/thread/conversion_op.h" #include "cutlass/epilogue/thread/reduction_op.h" #include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h" #include "cutlass/epilogue/warp/fragment_iterator_simt.h" #include "cutlass/epilogue/warp/tile_iterator_simt.h" #include "cutlass/epilogue/threadblock/default_thread_map_simt.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_direct_conv.h" #include "cutlass/epilogue/threadblock/shared_load_iterator.h" #include "cutlass/epilogue/threadblock/shared_load_iterator_pitch_liner.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/threadblock/epilogue_depthwise.h" #include "cutlass/layout/permute.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for SimtOps. template < typename Shape_, typename WarpMmaSimt_, typename OutputOp_, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute > struct DefaultEpilogueSimt { using Shape = Shape_; using WarpMmaSimt = WarpMmaSimt_; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; static const int kPartitionsK = Shape::kK / WarpMmaSimt::Shape::kK; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaSimt::LayoutC; using ElementAccumulator = typename WarpMmaSimt::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapSimt< Shape, typename WarpMmaSimt::Shape, typename WarpMmaSimt::Policy, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorSimt< typename WarpMmaSimt::Shape, typename WarpMmaSimt::ThreadMma, layout::RowMajor, typename WarpMmaSimt::Policy >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorSimt< typename WarpMmaSimt::Shape, typename WarpMmaSimt::ThreadMma, ElementAccumulator, layout::RowMajor, typename WarpMmaSimt::Policy >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaSimt, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for SimtOps. template < typename Shape_, typename WarpMmaSimt_, typename OutputOp_, int ElementsPerAccess > struct DefaultEpilogueSimtStridedDgrad { using Shape = Shape_; using WarpMmaSimt = WarpMmaSimt_; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; static const int kPartitionsK = Shape::kK / WarpMmaSimt::Shape::kK; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaSimt::LayoutC; using ElementAccumulator = typename WarpMmaSimt::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapSimt< Shape, typename WarpMmaSimt::Shape, typename WarpMmaSimt::Policy, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad< OutputTileThreadMap, ElementOutput >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorSimt< typename WarpMmaSimt::Shape, typename WarpMmaSimt::ThreadMma, layout::RowMajor, typename WarpMmaSimt::Policy >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorSimt< typename WarpMmaSimt::Shape, typename WarpMmaSimt::ThreadMma, ElementAccumulator, layout::RowMajor, typename WarpMmaSimt::Policy >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaSimt, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for SimtOps. template < int Rank, typename Shape_, typename WarpMmaSimt_, typename OutputOp_, int ElementsPerAccess > struct DefaultEpilogueSimtAffineRankN { using Shape = Shape_; using WarpMmaSimt = WarpMmaSimt_; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; static const int kPartitionsK = Shape::kK / WarpMmaSimt::Shape::kK; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaSimt::LayoutC; using ElementAccumulator = typename WarpMmaSimt::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapSimt< Shape, typename WarpMmaSimt::Shape, typename WarpMmaSimt::Policy, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankN< OutputTileThreadMap, ElementOutput, Rank >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorSimt< typename WarpMmaSimt::Shape, typename WarpMmaSimt::ThreadMma, layout::RowMajor, typename WarpMmaSimt::Policy >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorSimt< typename WarpMmaSimt::Shape, typename WarpMmaSimt::ThreadMma, ElementAccumulator, layout::RowMajor, typename WarpMmaSimt::Policy >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaSimt, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for SimtOps. template <typename Shape_, // ThreadBlock Shape typename WarpMmaSimt_, // mma_depthwise_simt typename OutputOp_, int ElementsPerAccess_, typename ThreadOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1>, typename ThreadBlockOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1> > struct DefaultDirectConvEpilogueSimt { using Shape = Shape_; using WarpMmaSimt = WarpMmaSimt_; using WarpShape = typename WarpMmaSimt::Shape; using OutputOp = OutputOp_; using ThreadOutputShape = ThreadOutputShape_; using ThreadBlockOutputShape = ThreadBlockOutputShape_; static int const kElementsPerAccess = ElementsPerAccess_; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaSimt::LayoutC; using ElementAccumulator = typename WarpMmaSimt::ElementC; /// Number of threads total using WarpCount = gemm::GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN >; static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value; static int const kThreads = WarpCount::kCount * kWarpSize; // // Thread map // using OutputTileThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<ThreadBlockOutputShape::kC, ThreadBlockOutputShape::kNHW>, kThreads, kElementsPerAccess >; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorDirectConv< OutputTileThreadMap, ElementOutput, ThreadOutputShape, ThreadBlockOutputShape >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorSimt< typename WarpMmaSimt::Shape, typename WarpMmaSimt::ThreadMma, layout::RowMajor, typename WarpMmaSimt::Policy >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorSimtDirect2dConv< typename WarpMmaSimt::Shape, ThreadOutputShape, ThreadBlockOutputShape, typename WarpMmaSimt::ThreadMma, ElementAccumulator, layout::RowMajor, typename WarpMmaSimt::Policy >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIteratorPitchLiner< OutputTileThreadMap, ElementAccumulator >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::EpilogueDepthwise< Shape, ThreadOutputShape, ThreadBlockOutputShape, WarpMmaSimt, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
13,319
C
30.714286
100
0.696524
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_workspace.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs. This does not attempt to target any particular output layout. Instead, each threadblock streams out its accumulator elements using 128b store operations. This assumes all threadblocks have unique output tiles. The target data layout is: - threadblock indices mapped to linear offsets as (m, n, k), where m is fastest-changing - threadblock output space partitioned into warps; each warp's region is contiguous - per-thread accumulators partitioned into 128b accesses - output memory striped across the threads of a warp This enables very fast streaming of data, completely limited by the memory system. No predication or data exchange is performed, and each threadblock is assumed to have a full region of memory to write to. This epilogue establishes an upper bound for epilogue performance and is suitable for reductions across the GEMM K dimension which require a separate workspace. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, ///< shape of accumulator tile (concept: MatrixShape) int WarpCount, ///< number of warps typename FragmentC_ ///< warp-level GEMM operator (concept: gemm::warp::Mma) > class EpilogueWorkspace { public: using Shape = Shape_; using FragmentC = FragmentC_; using ElementC = typename FragmentC::value_type; static int const kWarpCount = WarpCount; /// Optimize for 128b accesses static int const kAccessSizeInBits = 128; /// Warp size from the perspective of memory operations static int const kWarpSize = 32; /// Vector length of accesses static int const kElementsPerAccess = kAccessSizeInBits / sizeof_bits<ElementC>::value; /// Number of stores per thread static int const kIterations = FragmentC::kElements / kElementsPerAccess; static_assert( !(FragmentC::kElements % kElementsPerAccess), "The number of accumulators must be divisible by the access size."); /// Total number of vectorized accesses in warp (in units of vector) static int const kWarpAccesses = kIterations * kWarpSize; /// Total number of vectorized accesses in threadblock tile (in units of vector) static int const kThreadblockAccesses = kWarpAccesses * kWarpCount; /// Parameters structure struct Params { /// Pointer to C matrix ElementC *ptr_C; /// Stride between tiles along the GEMM N dimension (in units of vectors) int stride_n; /// Stride between tiles along the GEMM K dimension (in units of vectors) int stride_k; // // Methods // CUTLASS_HOST_DEVICE Params( ElementC *ptr_C, ///< Pointer to C matrix int stride_n_, ///< Stride between tiles along the GEMM N dimension (in units of ElementC) int stride_k_ ///< Stride between tiles along the GEMM K dimension (in units of ElementC) ): ptr_C(ptr_C), stride_n(stride_n_ / kElementsPerAccess), stride_k(stride_k_ / kElementsPerAccess) { } }; /// Shared storage allocation needed by the epilogue struct SharedStorage { // Intentionally empty }; private: struct alignas((kAccessSizeInBits / 8)) AccessType { Array<ElementC, kElementsPerAccess> storage; }; /// Constant reference to parameters object AccessType *pointer_; /// Stride between tiles along the n dimension (in vectors) int stride_n_; /// Stride between tiles along the k dimension (in vectors) int stride_k_; public: /// Constructor CUTLASS_DEVICE EpilogueWorkspace( Params const &params, ///< Host-constructable params object SharedStorage &, ///< Shared storage object int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): pointer_(reinterpret_cast<AccessType *>(params.ptr_C)), stride_n_(params.stride_n), stride_k_(params.stride_k) { // Add per-thread offset pointer_ += lane_idx + warp_idx * kWarpAccesses; } /// Streams the result to global memory CUTLASS_DEVICE void operator()( cutlass::gemm::GemmCoord problem_size, ///< Problem size of GEMM (units of ElementC) cutlass::gemm::GemmCoord tb_tile_coord, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) FragmentC const &accum) { ///< Accumulator tile // Compute offset for entire threadblock (note, per-thread offset has been folded in already) AccessType *pointer = pointer_ + tb_tile_coord.m() * kThreadblockAccesses + tb_tile_coord.n() * stride_n_ + tb_tile_coord.k() * stride_k_; // Cast to vectorized view of accumulator fragments AccessType const * src_pointer = reinterpret_cast<AccessType const *>(&accum); // Write out accumulators at full speed CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kIterations; ++i) { pointer[i * kWarpSize] = src_pointer[i]; } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
7,308
C
35.914141
121
0.655993
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/direct_store_epilogue_iterator.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/memory.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { //////////////////////////////////////////////////////////////////////////////// namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// template <typename Element_> class DirectStoreEpilogueIterator { public: using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = 1; /// Uses a non-template class struct Params : PredicatedTileIteratorParams { using Base = PredicatedTileIteratorParams; CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Layout const &layout) { stride = layout.stride(0) * sizeof(Element); } CUTLASS_HOST_DEVICE Params(Base const &base) : Base(base) { } }; public: // // Data members // Element *pointer; // pointer to the output matrix LongIndex stride; // stride in elements between rows TensorCoord extent; // extent of output matrix int thread_idx; // thread index TensorCoord threadblock_offset; public: /// Constructor CUTLASS_DEVICE DirectStoreEpilogueIterator( PredicatedTileIteratorParams const & params, Element *pointer_, TensorCoord extent_, int thread_idx_, TensorCoord threadblock_offset_ = TensorCoord(), int const * indices = nullptr ): pointer(pointer_), stride(params.stride / sizeof(Element)), extent(extent_), thread_idx(thread_idx_), threadblock_offset(threadblock_offset_) { } }; /////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
4,678
C
31.720279
100
0.645361
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_params.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/conv2d_problem_size.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// struct OutputTileShapeDesc { int column; int row; int group; int cluster; int tile; // // Methods // /// Default ctor CUTLASS_HOST_DEVICE OutputTileShapeDesc(): column(0), row(0), group(0), cluster(0), tile(0) { } /// Ctor CUTLASS_HOST_DEVICE OutputTileShapeDesc( int column_, int row_, int group_, int cluster_, int tile_ ): column(column_), row(row_), group(group_), cluster(cluster_), tile(tile_) { } /// Total number of points in the 5D space CUTLASS_HOST_DEVICE int count() const { return column * row * group * cluster * tile; } #if 0 CUTLASS_HOST_DEVICE void print() const { printf("{%d, %d, %d, %d, %d}", column, row, group, cluster, tile); } #endif }; /// Helper template to construct an OutputTileShapeDesc from a OutputTileShape template. template <typename Shape> CUTLASS_HOST_DEVICE OutputTileShapeDesc make_OutputTileShapeDesc() { return OutputTileShapeDesc( Shape::kColumn, Shape::kRow, Shape::kGroup, Shape::kCluster, Shape::kTile ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Thread map description struct OutputTileThreadMapDesc { int threads; int elements_per_access; OutputTileShapeDesc shape; OutputTileShapeDesc iterations; OutputTileShapeDesc delta; OutputTileShapeDesc count; // // Methods // CUTLASS_HOST_DEVICE OutputTileThreadMapDesc() { } CUTLASS_HOST_DEVICE OutputTileThreadMapDesc( int threads_, int elements_per_access_, OutputTileShapeDesc shape_, OutputTileShapeDesc iterations_, OutputTileShapeDesc delta_, OutputTileShapeDesc count_ ): threads(threads_), elements_per_access(elements_per_access_), shape(shape_), iterations(iterations_), delta(delta_), count(count_) { } }; /// Helper template to construct an OutputTileShapeDesc from a OutputTileThreadMap template. template <typename ThreadMap> CUTLASS_HOST_DEVICE OutputTileThreadMapDesc make_OutputTileThreadMapDesc() { return OutputTileThreadMapDesc( ThreadMap::kThreads, ThreadMap::kElementsPerAccess, make_OutputTileShapeDesc<typename ThreadMap::Shape>(), make_OutputTileShapeDesc<typename ThreadMap::Iterations>(), make_OutputTileShapeDesc<typename ThreadMap::Delta>(), make_OutputTileShapeDesc<typename ThreadMap::Count>() ); } /////////////////////////////////////////////////////////////////////////////// // // Parameters struct for PredicatedTileIterator // struct PredicatedTileIteratorParams { using Index = int32_t; using LongIndex = int64_t; // // Data members // LongIndex stride; ///< stride in bytes between rows LongIndex increment_row; ///< increment quantity (in bytes) to advance when moving between rows LongIndex increment_group; ///< increment quantity (in bytes) to advance when moving to the next group LongIndex increment_cluster; ///< increment quantity (in bytes) to advance when moving to the next cluster LongIndex advance_row; ///< amount to add to move to the next 'row' position LongIndex advance_group; ///< amount to add to move to the next 'group' position LongIndex advance_cluster; ///< amount to add to move to the next 'cluster' position LongIndex advance_tile; ///< amount to add to move to the next 'tile' // // Methods // CUTLASS_HOST_DEVICE Status initialize(LongIndex stride_, OutputTileThreadMapDesc thread_map) { stride = stride_; increment_row = stride * thread_map.delta.row; increment_group = stride * thread_map.delta.group - stride * thread_map.delta.row * (thread_map.iterations.row - 1); increment_cluster = stride * thread_map.delta.cluster - stride * thread_map.delta.group * (thread_map.iterations.group - 1) - stride * thread_map.delta.row * (thread_map.iterations.row - 1); advance_row = stride * thread_map.shape.row; advance_group = stride * (thread_map.shape.group - 1) * thread_map.shape.row * thread_map.count.row; advance_cluster = stride * thread_map.count.group * thread_map.shape.group * thread_map.count.row * thread_map.shape.row; advance_tile = stride * thread_map.shape.group * thread_map.shape.row * thread_map.shape.cluster * thread_map.shape.tile; return Status::kSuccess; } CUTLASS_HOST_DEVICE Status initialize(Index stride_, OutputTileThreadMapDesc thread_map) { return initialize(LongIndex(stride_), thread_map); } CUTLASS_HOST_DEVICE PredicatedTileIteratorParams() { initialize(LongIndex(0), OutputTileThreadMapDesc()); } CUTLASS_HOST_DEVICE PredicatedTileIteratorParams(Index stride, OutputTileThreadMapDesc thread_map) { initialize(stride, thread_map); } CUTLASS_HOST_DEVICE PredicatedTileIteratorParams(LongIndex stride, OutputTileThreadMapDesc thread_map) { initialize(stride, thread_map); } }; /////////////////////////////////////////////////////////////////////////////// // // Parameters struct for PredicatedTileIteratorDirect2dConv // struct PredicatedTileIteratorDirect2dConvParams{ using Index = int32_t; using LongIndex = int64_t; // // Data members // FastDivmod pq_divmod; FastDivmod q_divmod; LongIndex stride; LongIndex stride_n; LongIndex stride_p; int N; int P; int Q; // // Methods // CUTLASS_HOST_DEVICE Status initialize(LongIndex stride_, cutlass::conv::Conv2dProblemSize const &problem_size, MatrixCoord threadblock_output_shape) { stride = stride_; // The stride per row of output tensor (bytes) stride_n = problem_size.P * problem_size.Q; stride_p = problem_size.Q ; N = problem_size.N; P = problem_size.P; Q = problem_size.Q; // Fastdivmod for output O, P, Q if(threadblock_output_shape.row() != 0 && threadblock_output_shape.column() !=0 ){ int tiles_p = (problem_size.P + (threadblock_output_shape.row() - 1)) / (threadblock_output_shape.row()); int tiles_q = (problem_size.Q + (threadblock_output_shape.column() - 1)) / (threadblock_output_shape.column()); pq_divmod = FastDivmod(tiles_p * tiles_q); q_divmod = FastDivmod(tiles_q); } return Status::kSuccess; } CUTLASS_HOST_DEVICE Status initialize( Index stride_, cutlass::conv::Conv2dProblemSize const &problem_size = cutlass::conv::Conv2dProblemSize(), MatrixCoord threadblock_output_shape = MatrixCoord()) { return initialize(LongIndex(stride_), problem_size, threadblock_output_shape); } CUTLASS_HOST_DEVICE PredicatedTileIteratorDirect2dConvParams() { initialize(LongIndex(0)); } CUTLASS_HOST_DEVICE PredicatedTileIteratorDirect2dConvParams(Index stride, cutlass::conv::Conv2dProblemSize const &problem_size, MatrixCoord threadblock_output_shape) { initialize(stride, problem_size, threadblock_output_shape); } CUTLASS_HOST_DEVICE PredicatedTileIteratorDirect2dConvParams(LongIndex stride, cutlass::conv::Conv2dProblemSize const &problem_size, MatrixCoord threadblock_output_shape) { initialize(stride, problem_size, threadblock_output_shape); } }; /////////////////////////////////////////////////////////////////////////////// // InterleavedPredicatedTileIterator /////////////////////////////////////////////////////////////////////////////// /// Predicated tile access iterator descriptor object containing template dependent state struct InterleavedPredicatedTileIteratorDesc { int element_size_bits; int elements_per_access; int threadmap_warp_size; layout::PitchLinearCoord threadmap_iterations; layout::PitchLinearCoord threadmap_delta; // // Methods // CUTLASS_HOST_DEVICE InterleavedPredicatedTileIteratorDesc() { } CUTLASS_HOST_DEVICE InterleavedPredicatedTileIteratorDesc( int element_size_bits_, int elements_per_access_, int threadmap_warp_size_, layout::PitchLinearCoord threadmap_iterations_, layout::PitchLinearCoord threadmap_delta_ ): element_size_bits(element_size_bits_), elements_per_access(elements_per_access_), threadmap_warp_size(threadmap_warp_size_), threadmap_iterations(threadmap_iterations_), threadmap_delta(threadmap_delta_) { } }; // // Parameters struct InterleavedPredicatedTileIterator // struct InterleavedPredicatedTileIteratorParams { using Index = int32_t; using LongIndex = int64_t; // // Data members // LongIndex stride; ///< stride in bytes between rows LongIndex advance_row; ///< amount to add to move to the next 'row' position LongIndex advance_column; ///< amount to add to move to the next 'column' position // // Methods // CUTLASS_HOST_DEVICE Status initialize(LongIndex stride_, InterleavedPredicatedTileIteratorDesc desc) { stride = stride_; advance_row = desc.threadmap_delta.contiguous() * desc.element_size_bits / 8; advance_column = stride_ - desc.threadmap_iterations.contiguous() * desc.elements_per_access * desc.element_size_bits * desc.threadmap_warp_size / 8; return Status::kSuccess; } CUTLASS_HOST_DEVICE InterleavedPredicatedTileIteratorParams() { initialize(LongIndex(0), InterleavedPredicatedTileIteratorDesc()); } CUTLASS_HOST_DEVICE InterleavedPredicatedTileIteratorParams(Index stride, InterleavedPredicatedTileIteratorDesc desc) { initialize(stride, desc); } CUTLASS_HOST_DEVICE InterleavedPredicatedTileIteratorParams(LongIndex stride, InterleavedPredicatedTileIteratorDesc desc) { initialize(stride, desc); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Helper template to construct an OutputTileShapeDesc from a OutputTileThreadMap template. template <typename Element, typename ThreadMap> CUTLASS_HOST_DEVICE InterleavedPredicatedTileIteratorDesc make_InterleavedPredicatedTileIteratorDesc() { return InterleavedPredicatedTileIteratorDesc( sizeof_bits<Element>::value, ThreadMap::kElementsPerAccess, ThreadMap::kWarpSize, {ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided}, {ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided} ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Helper template to construct an MakePredicatedTileIteratorDesc from a template // dependent state template <typename Element, typename Layout, typename ThreadMap> struct MakePredicatedTileIteratorDesc; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for layout::RowMajor output data. template <typename Element, typename ThreadMap> struct MakePredicatedTileIteratorDesc < Element, layout::RowMajor, ThreadMap> { CUTLASS_HOST_DEVICE OutputTileThreadMapDesc operator()() { return make_OutputTileThreadMapDesc<ThreadMap>(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for layout::ColumnMajorInterleaved<InterleavedN> output data. template <typename Element, typename ThreadMap, int InterleavedN> struct MakePredicatedTileIteratorDesc < Element, layout::ColumnMajorInterleaved<InterleavedN>, ThreadMap> { CUTLASS_HOST_DEVICE InterleavedPredicatedTileIteratorDesc operator()() { return make_InterleavedPredicatedTileIteratorDesc<Element, ThreadMap>(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
14,496
C
29.455882
112
0.638176
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_thread_map_wmma_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "predicated_tile_iterator.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/pitch_linear.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for Wmma TensorOp accumulator layouts template < typename ThreadblockShape_, typename WarpShape_, typename InstructionShape_, int PartitionsK, typename Element_, int ElementsPerAccess > struct DefaultThreadMapWmmaTensorOp { using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; static int const kPartitionsK = PartitionsK; using Element = Element_; static int const kElementsPerAccess = ElementsPerAccess; // // Definitions // struct Detail { /// Wmma Tensor Operations fundamentally perform operations on InstructionShape::kM rows static int const kTensorOpRows = InstructionShape::kM; static int const kWarpSize = 32; static_assert( !(ThreadblockShape::kM % WarpShape::kM) && !(ThreadblockShape::kN % WarpShape::kN), "Divisibility"); /// Number of warps using WarpCount = gemm::GemmShape< ThreadblockShape::kM / WarpShape::kM, ThreadblockShape::kN / WarpShape::kN, kPartitionsK >; /// Number of participating threads static int const kThreads = WarpCount::kCount * kWarpSize; }; // // ThreadMap // /// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap using Type = OutputTileOptimalThreadMap < OutputTileShape<ThreadblockShape::kN, Detail::kTensorOpRows, Detail::WarpCount::kM, 1, 1>, OutputTileShape<1, WarpShape::kM / Detail::kTensorOpRows, 1, 1, WarpShape::kM / Detail::kTensorOpRows>, Detail::kThreads, kElementsPerAccess, sizeof_bits<Element>::value >; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
4,098
C
34.95614
107
0.647877
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/shared_load_iterator_pitch_liner.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. This assumes the shared memory tile is in a permuted layout which avoids bank conflicts on loading. When the fragment is loaded into registers, it matches the row-major thread map assumed by the predicated tile iterator writing to global memory. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_ref.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load output tile from shared memory in epilogue. /// /// Satisfies: ReadableTileIterator /// template <typename ThreadMap_, ///< Thread map (conept: PitchLinearThreadMap) typename Element_, ///< Element data type int MaxAlignment = ThreadMap_::kElementsPerAccess *sizeof_bits<Element_>::value / 8> class SharedLoadIteratorPitchLiner { public: using ThreadMap = ThreadMap_; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kMinAlignment = ThreadMap_::kElementsPerAccess * sizeof_bits<Element_>::value / 8; static int const kAlignment = (MaxAlignment < kMinAlignment ? MaxAlignment : kMinAlignment); static int const kThreads = ThreadMap::kThreads; /// Fragment object using Fragment = Array<Element, ThreadMap::Iterations::kCount * kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, kElementsPerAccess, kAlignment>; /// Vector type used for SMEM loads using LoadType = AlignedArray<Element, const_min(128 / sizeof_bits<Element>::value, ThreadMap::kElementsPerAccess), const_min(16, kAlignment)>; static int const kLoadsPerAccess = AccessType::kElements / LoadType::kElements; private: // // Data members // /// Byte-level pointer uint8_t *byte_pointer_; /// Stride along adjacent rows int stride_; /// Base address offset Index base_smem_address_; public: // // Methods // /// Constructor CUTLASS_DEVICE SharedLoadIteratorPitchLiner(TensorRef ref, int thread_idx) : byte_pointer_(reinterpret_cast<uint8_t *>(ref.data())), stride_((ref.stride(0) * sizeof_bits<Element>::value) / 8), base_smem_address_(0) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx); // Initialize pointer // thread_offset.row() is contiguous dim // thread_offset.column() is stride dim byte_pointer_ += thread_offset.row() * sizeof(AccessType) / kElementsPerAccess+ thread_offset.column() * stride_ ; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_DEVICE void add_tile_offset(TensorCoord const &offset) { byte_pointer_ += offset.row() * ThreadMap::StorageShape::kContiguous * sizeof(AccessType) / kElementsPerAccess + offset.column() * ThreadMap::StorageShape::kStrided * stride_; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { uint8_t const *byte_pointer = byte_pointer_ + s * ThreadMap::Delta::kStrided * stride_ + c * ThreadMap::Delta::kContiguous * ThreadMap::kElementsPerAccess * sizeof_bits<Element>::value / 8 + pointer_offset * sizeof_bits<Element>::value / 8 + base_smem_address_; int frag_base_idx = s * ThreadMap::Iterations::kContiguous + c; LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag); LoadType const *memory_pointer = reinterpret_cast<LoadType const *>(byte_pointer); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kLoadsPerAccess; ++v) { frag_ptr[frag_base_idx * kLoadsPerAccess + v] = memory_pointer[v]; } } } } /// Loads a fragment from memory CUTLASS_DEVICE void set_smem_base_address(Index address) { base_smem_address_ = address; } /// Loads a fragment CUTLASS_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
7,394
C
36.923077
103
0.653773
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_direct_store.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs and convolution using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/conversion_op.h" #include "cutlass/epilogue/thread/reduction_op.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator template < typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename OutputOp_ ///< Output operator > class EpilogueDirectStore { public: using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; using WarpShape = typename WarpMmaOperator_::Shape; static int const kPartitionsK = PartitionsK; using OutputTileIterator = OutputTileIterator_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using OutputOp = OutputOp_; using Padding = MatrixShape<0, 0>; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// The complete warp-level accumulator tile using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile; /// Accumulator element using ElementAccumulator = typename WarpTileIterator::Element; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; /// Tensor reference to destination tensor using TensorRef = typename OutputTileIterator::TensorRef; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Const tensor reference to source tensor using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; /// Array type used to output using OutputAccessType = Array< typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Number of warps using WarpCount = gemm::GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, kPartitionsK >; /// Use this to control the granularity of one epilogue 'iteration' static int const kFragmentsPerIteration = 1; static int constexpr kSmemTiles = 1; static int constexpr kSmemPointerOffset = 0; /// Shared storage allocation needed by the epilogue struct SharedStorage { } ; private: // Assume accumulator tile is multipile interleaved 32x32 tile. static int const kElementsPerPartial = 4; using EleShapePerPatial = typename platform::conditional< platform::is_same<ElementAccumulator, float>::value, MatrixShape<2, 2>, MatrixShape<1, 4> >::type; static int const kElementsPerMma = 8; static int const kAccumulatorPatials = 2; using QuadShapePerPatialMma = MatrixShape<4, 4>; static_assert(OutputOp::kCount >= 2, "The direct store epilogue for Tensor Ops requires the output functor have kCount >= 2."); private: LongIndex warp_offset; int thread_idx; int warp_idx; int lane_idx; int warp_m, warp_n; // warp coordinates within a cta int tid_m, tid_n; // thread coordinates within a warp public: /// Constructor CUTLASS_DEVICE EpilogueDirectStore( SharedStorage &shared_storage, ///< Shared storage object int thread_idx_, ///< ID of a thread within the threadblock int warp_idx_, ///< ID of warp within threadblock int lane_idx_ ///< Id of thread within warp ): thread_idx(thread_idx_), warp_idx(warp_idx_), lane_idx(lane_idx_) { // warp offsetting calculations warp_offset = warp_idx * WarpShape::kM * WarpShape::kN; int warp_id_mn = warp_idx % (WarpCount::kM * WarpShape::kN); warp_m = warp_id_mn % WarpCount::kM; warp_n = warp_id_mn / WarpCount::kM; MatrixCoord warp_offset_coord(warp_m*WarpShape::kM, warp_n*WarpShape::kN); // thread offsetting calculations int quad = (lane_idx >> 2); int lane_in_quad = (lane_idx & 3); // this seems to be te correct layout tid_m = quad; tid_n = 2 * lane_in_quad; } /// Streams the result to global memory CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) if (!output_op.is_source_needed()) { compute_source_not_needed_(output_op, destination_iterator, accumulators); } else { compute_source_needed_(output_op, destination_iterator, accumulators, source_iterator); } } private: /// Streams the result to global memory CUTLASS_DEVICE void compute_source_needed_( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) const int kAccumBlockN = 2; const int kThreadsM = 8; const int kThreadsN = 4; const int kBlockM = WarpShape::kM / kThreadsM; /// Array type used to output using OutputAccessType = AlignedArray<ElementOutput, kAccumBlockN>; /// Array type passed to the output operator - unused elements are optimized away using OutputFragmentType = Array<ElementOutput, OutputOp::kCount>; /// Array type used by output functor using AccumulatorAccessType = Array<ElementAccumulator, kAccumBlockN>; /// Array type used by output functor using AccumulatorFragmentType = Array<ElementAccumulator, OutputOp::kCount>; AccumulatorAccessType const *accumulator_pair = reinterpret_cast<AccumulatorAccessType const *>(&accumulators); CUTLASS_PRAGMA_UNROLL for (int accum_m_idx = 0; accum_m_idx < WarpShape::kM / kThreadsM; accum_m_idx++) { int accum_m = kThreadsM * accum_m_idx; int mL = destination_iterator.threadblock_offset.row() + WarpShape::kM * warp_m + tid_m + accum_m; int nL_base = destination_iterator.threadblock_offset.column() + WarpShape::kN * warp_n + tid_n; ElementOutput *output_ptr = destination_iterator.pointer + mL * destination_iterator.stride; ElementOutput *source_ptr = source_iterator.pointer + mL * source_iterator.stride; int const kIterationsN = WarpShape::kN / kThreadsN / kAccumBlockN; CUTLASS_PRAGMA_UNROLL for (int accum_n_idx = 0; accum_n_idx < kIterationsN; accum_n_idx++) { int accum_idx = accum_m_idx + kBlockM * accum_n_idx; int accum_n = kThreadsM * accum_n_idx; // mL and nL are logical coordinate in 2D mapping of epilogue's 4D output int nL = nL_base + accum_n; bool guard = (mL < destination_iterator.extent.row()) && (nL < destination_iterator.extent.column()); AccumulatorFragmentType accum_fragment; reinterpret_cast<AccumulatorAccessType &>(accum_fragment) = accumulator_pair[accum_idx]; OutputFragmentType output_fragment; if(guard) { reinterpret_cast<OutputAccessType &>(output_fragment) = *reinterpret_cast<OutputAccessType const *>(source_ptr + nL); } // Perform output operator output_fragment = output_op(accum_fragment, output_fragment); if(guard) { // Store *reinterpret_cast<OutputAccessType *>(output_ptr + nL) = reinterpret_cast<OutputAccessType const &>(output_fragment); } } } } /// Streams the result to global memory CUTLASS_DEVICE void compute_source_not_needed_( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) const int kAccumBlockN = 2; const int kThreadsM = 8; const int kThreadsN = 4; const int kBlockM = WarpShape::kM / kThreadsM; /// Array type used to output using OutputAccessType = AlignedArray<ElementOutput, kAccumBlockN>; /// Array type passed to the output operator - unused elements are optimized away using OutputFragmentType = Array<ElementOutput, OutputOp::kCount>; /// Array type used by output functor using AccumulatorAccessType = Array<ElementAccumulator, kAccumBlockN>; /// Array type used by output functor using AccumulatorFragmentType = Array<ElementAccumulator, OutputOp::kCount>; AccumulatorAccessType const *accumulator_pair = reinterpret_cast<AccumulatorAccessType const *>(&accumulators); CUTLASS_PRAGMA_UNROLL for (int accum_m_idx = 0; accum_m_idx < WarpShape::kM / kThreadsM; accum_m_idx++) { int accum_m = kThreadsM * accum_m_idx; int mL = destination_iterator.threadblock_offset.row() + WarpShape::kM * warp_m + tid_m + accum_m; int nL_base = destination_iterator.threadblock_offset.column() + WarpShape::kN * warp_n + tid_n; ElementOutput *output_ptr = destination_iterator.pointer + mL * destination_iterator.stride; int const kIterationsN = WarpShape::kN / kThreadsN / kAccumBlockN; CUTLASS_PRAGMA_UNROLL for (int accum_n_idx = 0; accum_n_idx < kIterationsN; accum_n_idx++) { int accum_idx = accum_m_idx + kBlockM * accum_n_idx; int accum_n = kThreadsM * accum_n_idx; // mL and nL are logical coordinate in 2D mapping of epilogue's 4D output int nL = nL_base + accum_n; bool guard = (mL < destination_iterator.extent.row()) && (nL < destination_iterator.extent.column()); AccumulatorFragmentType accum_fragment; reinterpret_cast<AccumulatorAccessType &>(accum_fragment) = accumulator_pair[accum_idx]; OutputFragmentType output_fragment; // Perform output operator output_fragment = output_op(accum_fragment); if(guard) { // Store *reinterpret_cast<OutputAccessType *>(output_ptr + nL) = reinterpret_cast<OutputAccessType const &>(output_fragment); } } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
13,933
C
39.04023
127
0.663174
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/output_tile_thread_map.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Metaprogram for determining the mapping of output elements to threads for epilogue tiles. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/fast_math.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tuple defining point in output tile template < int Column, int Row, int Group, int Cluster, int Tile > struct OutputTileShape { static int const kColumn = Column; static int const kRow = Row; static int const kGroup = Group; static int const kCluster = Cluster; static int const kTile = Tile; static int const kCount = kColumn * kRow * kGroup * kCluster * kTile; }; //////////////////////////////////////////////////////////////////////////////// template <typename Iterations, typename Delta> struct OutputTileThreadMapHelpers { /// Determines the iteration index of a vector access according to the thread map CUTLASS_HOST_DEVICE static void iteration_index( int &column_idx, int &row_idx, int &group_idx, int &cluster_idx, int &tile_idx, int iter_idx) { column_idx = iter_idx % Iterations::kColumn; int residual = iter_idx / Iterations::kColumn; row_idx = residual % Iterations::kRow; residual = residual / Iterations::kRow; group_idx = residual % Iterations::kGroup; residual = residual / Iterations::kGroup; cluster_idx = residual % Iterations::kCluster; tile_idx = residual / Iterations::kCluster; } /// Computes the offset of a given vector access CUTLASS_HOST_DEVICE static MatrixCoord iteration_offset(int iter_idx) { int column_idx; int row_idx; int group_idx; int cluster_idx; int tile_idx; iteration_index(column_idx, row_idx, group_idx, cluster_idx, tile_idx, iter_idx); return MatrixCoord( row_idx * Delta::kRow + group_idx * Delta::kGroup + cluster_idx * Delta::kCluster + tile_idx * Delta::kTile, column_idx * Delta::kColumn); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ThreadMap_, typename Shape_, typename Iterations_, typename Delta_, typename Count_ > struct OutputTileThreadMap : public OutputTileThreadMapHelpers<Iterations_, Delta_> { /// Conventional thread map (concept: ThreadMap) using ThreadMap = ThreadMap_; /// Number of threads participating in the operation static int const kThreads = ThreadMap::kThreads; /// Number of scalar elements per access static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; /// Shape of the tile using Shape = Shape_; /// Iterations performed by each thread using Iterations = Iterations_; /// Delta between accesses using Delta = Delta_; /// Number of iterator iterations using Count = Count_; /// Initial offset function CUTLASS_HOST_DEVICE static MatrixCoord initial_offset(int thread_idx) { using Index = typename layout::PitchLinearCoord::Index; layout::PitchLinearCoord coord = ThreadMap::initial_offset(thread_idx); Index cluster = coord.strided() / (Shape::kGroup * Shape::kRow); Index cluster_residual = coord.strided() % (Shape::kGroup * Shape::kRow); Index group = cluster_residual / (Shape::kRow); Index row = cluster_residual % (Shape::kRow); return MatrixCoord{ row + group * Shape::kRow * Count::kRow + cluster * Shape::kGroup * Count::kGroup * Shape::kRow * Count::kRow, coord.contiguous() }; } }; //////////////////////////////////////////////////////////////////////////////// namespace detail { /// RowArrangement determines how one or more warps cover a region of consecutive rows. template < typename Shape, int WarpsRemaining, int ElementsPerAccess, int ElementSize, bool Is2dTile > struct RowArrangement; /// RowArrangement in which each warp's access is a 1D tiled arrangement. template < typename Shape, int WarpsRemaining, int ElementsPerAccess, int ElementSize > struct RowArrangement<Shape, WarpsRemaining, ElementsPerAccess, ElementSize, false> { static int const kWarpSize = 32; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; static int const kIterationsRow = 1; static int const kDeltaRow = 1; static int const kIterationsColumn = Shape::kColumn / kElementsPerAccess / kWarpSize; static int const kDeltaColumn = kWarpSize * kElementsPerAccess; static int const kAccessWidth = kWarpSize; static int const kAccessRows = 1; static int const kWarpPartitionsRow = 1; static int const kWarpPartitionsColumn = WarpsRemaining; }; /// RowArrangement in which each warp's access is a 2D tiled arrangement. template < typename Shape, int WarpsRemaining, int ElementsPerAccess, int ElementSize > struct RowArrangement<Shape, WarpsRemaining, ElementsPerAccess, ElementSize, true> { static int const kMemoryAccessSize = 256; // Preferred access size static int const kWarpSize = 32; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; struct Detail { static int const kShapeRow = Shape::kRow / WarpsRemaining; static int const kShapeWidth = Shape::kColumn / kElementsPerAccess; static int const kTargetMemoryAccessWidth = kMemoryAccessSize / (kElementsPerAccess * kElementSize / 8); static int const kTargetAccessRows = kWarpSize / kTargetMemoryAccessWidth; }; static int const kAccessWidth = (Detail::kTargetAccessRows > Detail::kShapeRow ? kWarpSize / Detail::kShapeRow : const_min( Detail::kShapeWidth, const_min(kWarpSize, kMemoryAccessSize / (kElementsPerAccess * kElementSize / 8)) )); static int const kAccessRows = (Detail::kTargetAccessRows > Detail::kShapeRow ? Detail::kShapeRow : const_min(Shape::kRow, kWarpSize / kAccessWidth)); static int const kIterationsRow = Detail::kShapeRow / kAccessRows; static int const kDeltaRow = kAccessRows; static int const kIterationsColumn = Detail::kShapeWidth / kAccessWidth; static int const kDeltaColumn = kAccessWidth * kElementsPerAccess; static_assert( kAccessWidth * kElementsPerAccess <= Shape::kColumn, "Accessing too many elements per access"); static_assert( kIterationsColumn > 0, "Iteration Count Column must be > 0" ); static_assert( kIterationsRow > 0, "Iteration Count Row must be > 0" ); static int const kWarpPartitionsRow = 1; static int const kWarpPartitionsColumn = 1; }; } //////////////////////////////////////////////////////////////////////////////// /// Template metaprogram for partitioning a 4D space across warps to achieve several performance /// objectives: /// /// - coalesced memory accesses in units of 128 Byte lines /// - minimal address arithmetic /// - minimal predicate calculations /// template < typename Shape_, typename Count_, int Threads, int ElementsPerAccess, int ElementSize > struct OutputTileOptimalThreadMap { using Shape = Shape_; using Count = Count_; static int const kWarpSize = 32; static int const kThreads = Threads; static int const kWarpCount = kThreads / kWarpSize; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; // // Metaprogram computation // struct Detail { // Clusters static int const kIterationsCluster = ((Shape::kCluster > kWarpCount) ? Shape::kCluster / kWarpCount : 1); static int const kDeltaCluster = ((Shape::kCluster > kWarpCount) ? Shape::kRow * Count::kRow * Shape::kGroup * Count::kGroup * Shape::kCluster / kIterationsCluster : 1); static int const kCompactedDeltaCluster = ((Shape::kCluster > kWarpCount) ? Shape::kRow * Shape::kGroup * Shape::kCluster / kIterationsCluster : 1); static int const kWarpPartitionsCluster = ((Shape::kCluster > kWarpCount) ? kWarpCount : kWarpCount / Shape::kCluster); static int const kWarpsRemainingForGroups = ((Shape::kCluster > kWarpCount) ? 1 : kWarpCount / Shape::kCluster); // Groups static int const kIterationsGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? Shape::kGroup / kWarpsRemainingForGroups : 1); static int const kDeltaGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? Shape::kRow * Count::kRow * Shape::kGroup / kIterationsGroup : 1); static int const kCompactedDeltaGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? Shape::kRow * Shape::kGroup / kIterationsGroup : 1); static int const kWarpPartitionsGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? 1 : kWarpsRemainingForGroups / Shape::kGroup); static int const kWarpsRemainingForRows = ((Shape::kGroup > kWarpsRemainingForGroups) ? 1 : kWarpsRemainingForGroups / Shape::kGroup); // Rows using RowArrangement = detail::RowArrangement< Shape, kWarpsRemainingForRows, kElementsPerAccess, kElementSize, (Shape::kRow > kWarpsRemainingForRows) >; // Warp partitions using WarpPartitions = OutputTileShape< RowArrangement::kWarpPartitionsColumn, RowArrangement::kWarpPartitionsRow, kWarpPartitionsGroup, kWarpPartitionsCluster, 1>; static int const kAccessWidth = RowArrangement::kAccessWidth; static int const kAccessRows = RowArrangement::kAccessRows; }; // // Output // using Iterations = OutputTileShape< Detail::RowArrangement::kIterationsColumn, Detail::RowArrangement::kIterationsRow, Detail::kIterationsGroup, Detail::kIterationsCluster, 1>; using Delta = OutputTileShape< Detail::RowArrangement::kDeltaColumn, Detail::RowArrangement::kDeltaRow, Detail::kDeltaGroup, Detail::kDeltaCluster, 1>; /// Initial offset function CUTLASS_DEVICE static MatrixCoord initial_offset(int thread_idx) { int warp_idx = __shfl_sync(0xffffffff, thread_idx / kWarpSize, 0); int lane_idx = thread_idx % kWarpSize; // Compute warp location int cluster_idx = warp_idx / Detail::WarpPartitions::kCluster; int residual_cluster = warp_idx % Detail::WarpPartitions::kCluster; int group_idx = residual_cluster / Detail::WarpPartitions::kGroup; int residual_group = residual_cluster % Detail::WarpPartitions::kGroup; int row_idx = residual_group / Detail::WarpPartitions::kRow; int col_idx = residual_group % Detail::WarpPartitions::kRow; // Compute per-lane offset int lane_row_offset = lane_idx / Detail::kAccessWidth; int lane_col_offset = lane_idx % Detail::kAccessWidth; // Compute coordinate in output space int cluster_offset = cluster_idx * Shape::kRow * Count::kRow * Shape::kGroup * Count::kGroup; int group_offset = group_idx * Shape::kRow * Count::kRow; int row_offset = row_idx * Iterations::kRow * Detail::kAccessRows; int column_offset = col_idx * Iterations::kColumn * Detail::kAccessWidth * kElementsPerAccess; return MatrixCoord( cluster_offset + group_offset + row_offset + lane_row_offset, column_offset + lane_col_offset * kElementsPerAccess ); } /// Computes the offset of a given vector access CUTLASS_HOST_DEVICE static MatrixCoord iteration_offset(int iter_idx) { return OutputTileThreadMapHelpers<Iterations, Delta>::iteration_offset(iter_idx); } /// Compacted thread map in which the 4D region is contiguous struct CompactedThreadMap { using Shape = Shape_; using TileShape = MatrixShape< Shape::kTile * Shape::kCluster * Shape::kGroup * Shape::kRow, Shape::kColumn >; using Iterations = OutputTileShape< Detail::RowArrangement::kIterationsColumn, Detail::RowArrangement::kIterationsRow, Detail::kIterationsGroup, Detail::kIterationsCluster, 1>; using Delta = OutputTileShape< Detail::RowArrangement::kDeltaColumn, Detail::RowArrangement::kDeltaRow, Detail::kCompactedDeltaGroup, Detail::kCompactedDeltaCluster, 1>; /// Number of elements within each vector access static int const kElementsPerAccess = ElementsPerAccess; /// Number of threads static int const kThreads = Threads; /// Function to compute each thread's initial offset CUTLASS_DEVICE static MatrixCoord initial_offset(int thread_idx) { int warp_idx = __shfl_sync(0xffffffff, thread_idx / kWarpSize, 0); int lane_idx = thread_idx % kWarpSize; // Compute warp location int cluster_idx = warp_idx / Detail::WarpPartitions::kCluster; int residual_cluster = warp_idx % Detail::WarpPartitions::kCluster; int group_idx = residual_cluster / Detail::WarpPartitions::kGroup; int residual_group = residual_cluster % Detail::WarpPartitions::kGroup; int row_idx = residual_group / Detail::WarpPartitions::kRow; int col_idx = residual_group % Detail::WarpPartitions::kRow; // Compute per-lane offset int lane_row_offset = lane_idx / Detail::kAccessWidth; int lane_col_offset = lane_idx % Detail::kAccessWidth; // Compute coordinate in output space int cluster_offset = cluster_idx * Shape::kRow * Shape::kGroup; int group_offset = group_idx * Shape::kRow; int row_offset = row_idx * Iterations::kRow * Detail::kAccessRows; int column_offset = col_idx * Iterations::kColumn * Detail::kAccessWidth * kElementsPerAccess; MatrixCoord coord( cluster_offset + group_offset + row_offset + lane_row_offset, column_offset + lane_col_offset * kElementsPerAccess ); return coord; } }; }; //////////////////////////////////////////////////////////////////////////////// /// Template metaprogram for partitioning a 3D interleaved layout across warps /// to achieve several performance objectives: /// /// - coalesced memory accesses in units of 64 Byte lines /// - minimal address arithmetic /// - minimal predicate calculations /// template <typename WarpCount_, typename Iterations_, int Threads, int ElementsPerAccess, int ElementSize> struct InterleavedOutputTileThreadMap { using WarpCount = WarpCount_; static int const kWarpSize = 32; static int const kThreads = Threads; static int const kWarpCount = kThreads / kWarpSize; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; // // Metaprogram computation // struct Detail {}; // // Output // using Iterations = Iterations_; using Delta = layout::PitchLinearShape<kWarpSize * kElementsPerAccess, 1>; /// Initial offset function CUTLASS_HOST_DEVICE static layout::PitchLinearCoord initial_offset(int thread_idx) { int warp_idx = thread_idx / kWarpSize; int lane_idx = thread_idx % kWarpSize; // Compute warp location layout::PitchLinearCoord warp_footprint{ Delta::kContiguous * Iterations::kContiguous, Delta::kStrided * Iterations::kStrided}; layout::PitchLinearCoord warp_offset{warp_idx % WarpCount::kContiguous, warp_idx / WarpCount::kContiguous}; // Compute per-lane offset layout::PitchLinearCoord thread_offset_in_warp{ lane_idx * kElementsPerAccess, 0}; layout::PitchLinearCoord thread_offset_in_threadblock_tile = warp_footprint * warp_offset + thread_offset_in_warp; return thread_offset_in_threadblock_tile; } }; //////////////////////////////////////////////////////////////////////////////// /// Template metaprogram for partitioning a 4D interleaved layout across warps /// to achieve several performance objectives: /// /// - coalesced memory accesses in units of 64 Byte lines /// - minimal address arithmetic /// - minimal predicate calculations /// template <typename WarpCount_, typename Iterations_, int Threads, int ElementsPerAccess, int ElementSize> struct InterleavedConvOutputTileThreadMap { using WarpCount = WarpCount_; static int const kWarpSize = 32; static int const kThreads = Threads; static int const kWarpCount = kThreads / kWarpSize; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; // // Metaprogram computation // struct Detail {}; // // Output // using Iterations = Iterations_; using Delta = MatrixShape<kWarpSize / 4, 4 * kElementsPerAccess>; /// Initial offset function CUTLASS_HOST_DEVICE static MatrixCoord initial_offset(int thread_idx) { int warp_idx = thread_idx / kWarpSize; int lane_idx = thread_idx % kWarpSize; // Compute warp location MatrixCoord warp_footprint{ Delta::kRow * Iterations::kRow, Delta::kColumn * Iterations::kColumn, }; MatrixCoord warp_offset{warp_idx % WarpCount::kRow, warp_idx / WarpCount::kRow}; // Compute per-lane offset MatrixCoord thread_offset_in_warp{lane_idx / 4, (lane_idx % 4) * kElementsPerAccess}; MatrixCoord thread_offset_in_threadblock_tile = warp_footprint * warp_offset + thread_offset_in_warp; return thread_offset_in_threadblock_tile; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass
19,750
C
30.500797
112
0.666127
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. The shared memory resource is time-sliced across warps. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/functional.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" #include "cutlass/epilogue/threadblock/epilogue_base_streamk.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator template < typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename OutputOp_, ///< Output operator typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape) int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large (!IsEpilogueFunctorHeavy<OutputOp_>::value) > class Epilogue : public EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition>, public EpilogueBaseStreamK< Shape_, PartitionsK, WarpMmaOperator_, AccumulatorFragmentIterator_> { public: using Base = EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition>; using BaseStreamK = EpilogueBaseStreamK< Shape_, PartitionsK, WarpMmaOperator_, AccumulatorFragmentIterator_>; using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; static int const kPartitionsK = PartitionsK; using OutputTileIterator = OutputTileIterator_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using SharedLoadIterator = SharedLoadIterator_; using OutputOp = OutputOp_; using Padding = Padding_; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// Number of warps per block using WarpCount = typename Base::WarpCount; /// Number of threads per block static int const kBlockThreads = 32 * WarpCount::kCount; /// Per-thread accumulator tile type using AccumulatorTile = typename Base::AccumulatorTile; /// Numerical accumulation element type using ElementAccumulator = typename WarpMmaOperator::ElementC; /// Fragment type used by the accumulator tile's fragment iterator using AccumulatorFragment = typename AccumulatorFragmentIterator::Fragment; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; /// Tensor reference to destination tensor using TensorRef = typename OutputTileIterator::TensorRef; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Const tensor reference to source tensor using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; /// Vector type used by the global output iterator using OutputAccessType = Array< typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Vector type used by the shared output iterator using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>; static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK; static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles; public: static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements, "Mismatch between shared load iterator and output tile iterator."); static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero."); static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess), "Divisibility"); static_assert(kPartitionsK == 1 || Base::kFragmentsPerIteration == 1, "One of these must be exactly 1."); private: /// Loads fragment from shared memory aligned with output tensor SharedLoadIterator shared_load_iterator_; /// Thread index in the threadblock int thread_idx; /// Warp index in the threadblock int warp_idx; public: /// Constructor CUTLASS_DEVICE Epilogue( typename Base::SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx) ///< Id of thread within warp : Base(shared_storage, thread_idx, warp_idx, lane_idx), BaseStreamK(thread_idx), shared_load_iterator_(shared_storage.reference(), thread_idx), thread_idx(thread_idx), warp_idx(warp_idx) {} /// Aggregates the accumulator sets shared by peer blocks in the global workspace, /// performing epilogue computations, writing to output CUTLASS_DEVICE void reduce( int peer_idx_begin, int peer_idx_end, int reduce_fragment_idx, void *element_workspace, OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination OutputTileIterator source_iterator) ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) { // Redcuce peer accumulator fragments into one fragment AccumulatorFragment accum_fragment; BaseStreamK::reduce(accum_fragment, peer_idx_begin, peer_idx_end, reduce_fragment_idx, element_workspace); // Store fragment to shared memory this->warp_tile_iterator_.store(accum_fragment); __syncthreads(); // Initialize/load source-fragment data typename OutputTileIterator::Fragment source_fragment; source_fragment.clear(); if (output_op.is_source_needed()) { source_iterator += reduce_fragment_idx; source_iterator.load(source_fragment); } // Load fragment from shared memory typename SharedLoadIterator::Fragment aligned_accum_fragment; shared_load_iterator_.load(aligned_accum_fragment); // Add fragments shared by other k partitions if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { typename SharedLoadIterator::Fragment aligned_addend_fragment; shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); shared_load_iterator_.load(aligned_addend_fragment); aligned_accum_fragment = add_fragments(aligned_accum_fragment, aligned_addend_fragment); } } // Compute the output result typename OutputTileIterator::Fragment output_fragment; // Apply the output operator apply_output_operator(output_fragment, output_op, aligned_accum_fragment, source_fragment); // Store the final result destination_iterator += reduce_fragment_idx; destination_iterator.store(output_fragment); } /// Streams the result to global memory CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator ) ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) { if (!output_op.is_source_needed()) { source_iterator.clear_mask(); __syncthreads(); // Dummy (CUDA 11.0) } // Source-fragment data (zero-initialized for scenarios where the // output operator allows us to skip loading it from global input) typename OutputTileIterator::Fragment source_fragment; source_fragment.clear(); // Iterator over warp-level accumulator fragment AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // #pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration : 1) for (int iter = 0; iter < OutputTileIterator::kIterations; iter += Base::kFragmentsPerIteration) { // // Convert and store fragment // __syncthreads(); CUTLASS_PRAGMA_UNROLL for (int p = 0; p < Base::kFragmentsPerIteration; ++p) { typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); ++accum_fragment_iterator; this->warp_tile_iterator_.store(accum_fragment); if (p < Base::kFragmentsPerIteration - 1) { this->warp_tile_iterator_.add_pointer_offset(kSmemPointerOffset); } } if (Base::kFragmentsPerIteration > 1) { this->warp_tile_iterator_.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration)); } // // Load fragments from shared memory // __syncthreads(); CUTLASS_PRAGMA_UNROLL for (int p = 0; p < Base::kFragmentsPerIteration; ++p) { // Load addend source fragment from global memory source_iterator.load(source_fragment); ++source_iterator; typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment[0]); if (p < Base::kFragmentsPerIteration - 1) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); } else if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); shared_load_iterator_.load(aligned_accum_fragment[i]); aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); } shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset); } // // Compute the output result // typename OutputTileIterator::Fragment output_fragment; apply_output_operator(output_fragment, output_op, aligned_accum_fragment[0], source_fragment); // // Store the final result // destination_iterator.store(output_fragment); ++destination_iterator; } if (Base::kFragmentsPerIteration > 1) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration)); } } } private: /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator( typename OutputTileIterator::Fragment &output_fragment, OutputOp const &output_op, ///< Output operator typename SharedLoadIterator::Fragment const &aligned_accum_fragment, typename OutputTileIterator::Fragment const &source_fragment) { OutputAccessType *output_frag_ptr = reinterpret_cast<OutputAccessType *>(&output_fragment); AccumulatorAccessType const *compute_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment); OutputAccessType const *source_frag_ptr = reinterpret_cast<OutputAccessType const *>(&source_fragment); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { // Call the output operator output_frag_ptr[i] = output_op(compute_frag_ptr[i], source_frag_ptr[i]); } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
15,628
C
35.262181
128
0.677694
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_predicates.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief PredicatedTileIteratorPredicates. PredicatedTileIteratorPredicates enables both upper and lower bounds for predicates. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/memory.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { //////////////////////////////////////////////////////////////////////////////// namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tile iterator predicates used to bound computations in epilogue. /// /// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_ ///< Element data type > class PredicatedTileIteratorPredicates { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::Shape; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kThreads = ThreadMap::kThreads; static int const kIterations = ThreadMap::Count::kTile; static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0"); static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0"); static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0"); static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0"); /// Fragment object using Fragment = Array< Element, ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; // // Parameters struct // /// Uses a non-template class struct Params : PredicatedTileIteratorParams { CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Layout const &layout): PredicatedTileIteratorParams( layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess, make_OutputTileThreadMapDesc<ThreadMap>() ) { } }; /// Mask object struct Mask { static int const kCount = ThreadMap::Iterations::kColumn; /// Predicate state bool predicates[kCount]; // // Mask // CUTLASS_HOST_DEVICE Mask() { enable(); } ///< Efficiently disables all accesses guarded by mask CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = false; } } ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask CUTLASS_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = true; } } }; private: // // Data members // /// Parameters structure containing reference and precomputed state. PredicatedTileIteratorParams params_; /// Array of boolean values to contain steady-state predicates Mask mask_; /// Extent of the matrix tile in rows Index lower_extent_row_; Index upper_extent_row_; /// A thread's starting row position (assuming steady-state predicates have been computed) Index thread_start_row_; /// Internal state counter int state_[3]; // // Static asserts about internal strides // static_assert(sizeof(lower_extent_row_) == 4, "Expected 32b extents"); static_assert(sizeof(upper_extent_row_) == 4, "Expected 32b extents"); static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents"); static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides"); private: // // Methods // public: // // Methods // /// Constructor CUTLASS_DEVICE PredicatedTileIteratorPredicates( PredicatedTileIteratorParams const & params, TensorCoord lower_extent, TensorCoord upper_extent, int thread_idx, TensorCoord threadblock_offset = TensorCoord() ): params_(params) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset; lower_extent_row_ = lower_extent.row(); upper_extent_row_ = upper_extent.row(); thread_start_row_ = thread_offset.row(); // Initialize predicates CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) { mask_.predicates[c] = ((thread_offset.column() + ThreadMap::Delta::kColumn * c) < upper_extent.column()) && ((thread_offset.column() + ThreadMap::Delta::kColumn * c) >= lower_extent.column()); } // Initialize internal state counter state_[0] = state_[1] = state_[2] = 0; } /// Advances to the next position to load or store CUTLASS_HOST_DEVICE PredicatedTileIteratorPredicates &operator++() { ++state_[0]; thread_start_row_ += ThreadMap::Shape::kRow; if (state_[0] == ThreadMap::Count::kRow) { state_[0] = 0; ++state_[1]; thread_start_row_ += (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow; if (state_[1] == ThreadMap::Count::kGroup) { state_[1] = 0; ++state_[2]; thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow; if (state_[2] == ThreadMap::Count::kCluster) { state_[2] = 0; } } } return *this; } ///< Efficiently disables all accesses guarded by mask CUTLASS_DEVICE void clear_mask() { mask_.clear(); } ///< Efficiently enables all accesses guarded by mask CUTLASS_DEVICE void enable_mask() { mask_.enable(); } ///< Gets the mask CUTLASS_DEVICE void get_mask(Mask &mask) { mask = mask_; } ///< Sets the mask CUTLASS_DEVICE void set_mask(Mask const &mask) { mask_ = mask; } ///< Gets lower_extent_row_ CUTLASS_DEVICE Index get_lower_extent_row() { return lower_extent_row_; } ///< Gets upper_extent_row_ CUTLASS_DEVICE Index get_upper_extent_row() { return upper_extent_row_; } ///< Gets thread_start_row_ CUTLASS_DEVICE Index get_thread_start_row() { return thread_start_row_; } }; /////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
9,146
C
28.506452
100
0.640171
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_direct_store.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Direct store epilogue */ #pragma once //////////////////////////////////////////////////////////////////////////////// #include "cutlass/epilogue/threadblock/epilogue_direct_store.h" #include "cutlass/epilogue/threadblock/direct_store_epilogue_iterator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Given a properly constructed epilogue, returns a direct store epilogue template <typename EpilogueTensorOp> struct DefaultEpilogueDirectStore { using OutputTileIterator = DirectStoreEpilogueIterator<typename EpilogueTensorOp::OutputTileIterator::Element>; using Epilogue = EpilogueDirectStore< typename EpilogueTensorOp::Shape, typename EpilogueTensorOp::WarpMmaOperator, EpilogueTensorOp::kPartitionsK, OutputTileIterator, typename EpilogueTensorOp::AccumulatorFragmentIterator, typename EpilogueTensorOp::WarpTileIterator, typename EpilogueTensorOp::SharedLoadIterator, typename EpilogueTensorOp::OutputOp >; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
3,234
C
42.133333
113
0.635436
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops on Volta. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_clamp.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/epilogue/thread/linear_combination_gelu.h" #include "cutlass/epilogue/thread/linear_combination_sigmoid.h" #include "cutlass/epilogue/thread/linear_combination_planar_complex.h" #include "cutlass/epilogue/thread/conversion_op.h" #include "cutlass/epilogue/thread/reduction_op.h" #include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h" #include "cutlass/epilogue/threadblock/shared_load_iterator.h" #include "cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/layout/permute.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute > struct DefaultEpilogueVoltaTensorOp { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess, ElementAccumulator >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp< typename WarpMmaTensorOp::Shape, gemm::GemmShape<32, 32, 4>, ElementAccumulator, LayoutC >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp< typename WarpMmaTensorOp::Shape, gemm::GemmShape<32, 32, 4>, ElementAccumulator, LayoutC >; static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8; static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B"); using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator, kSharedMemAlignment >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess > struct DefaultEpilogueVoltaTensorOpStridedDgrad { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess, ElementAccumulator >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad< OutputTileThreadMap, ElementOutput >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp< typename WarpMmaTensorOp::Shape, gemm::GemmShape<32, 32, 4>, ElementAccumulator, LayoutC >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp< typename WarpMmaTensorOp::Shape, gemm::GemmShape<32, 32, 4>, ElementAccumulator, LayoutC >; static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8; static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B"); using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator, kSharedMemAlignment >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < int Rank, typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess > struct DefaultEpilogueVoltaTensorOpAffineRankN { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess, ElementAccumulator >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankN< OutputTileThreadMap, ElementOutput, Rank >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp< typename WarpMmaTensorOp::Shape, gemm::GemmShape<32, 32, 4>, ElementAccumulator, LayoutC >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp< typename WarpMmaTensorOp::Shape, gemm::GemmShape<32, 32, 4>, ElementAccumulator, LayoutC >; static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8; static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B"); using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator, kSharedMemAlignment >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
10,846
C
31.091716
123
0.705606
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_with_broadcast.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #include <cuda/std/utility> #else #include <assert.h> #include <utility> #endif #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/functional.h" #include "cutlass/fast_math.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/numeric_types.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// This base class is meant to define the concept required of the /// EpilogueWithBroadcast::OutputOp template < typename ElementC_, typename ElementAccumulator_, typename ElementCompute_, typename ElementZ_, typename ElementT_, int ElementsPerAccess, bool StoreZ = true, bool StoreT = true > struct EpilogueWithBroadcastOpBase { using ElementOutput = ElementC_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; using ElementZ = ElementZ_; using ElementT = ElementT_; static int const kElementsPerAccess = ElementsPerAccess; using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>; using FragmentCompute = Array<ElementCompute, kElementsPerAccess>; using FragmentC = Array<ElementOutput, kElementsPerAccess>; using FragmentZ = Array<ElementZ, kElementsPerAccess>; using FragmentT = Array<ElementT, kElementsPerAccess>; /// If true, the 'Z' tensor is stored static bool const kStoreZ = StoreZ; /// If true, the 'T' tensor is stored static bool const kStoreT = StoreT; /// Parameters structure - required struct Params { }; // // Methods // /// Constructor from Params EpilogueWithBroadcastOpBase(Params const &params_) { } /// Determine if the source is needed. May return false if bool is_source_needed() const { return true; } CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { } /// Applies the operation when is_source_needed() is true CUTLASS_HOST_DEVICE void operator()( FragmentZ &frag_Z, FragmentT &frag_T, FragmentAccumulator const &AB, FragmentC const &frag_C1, FragmentC const &frag_C2, FragmentCompute const &V) const { } /// Applies the operation when is_source_needed() is false CUTLASS_HOST_DEVICE void operator()( FragmentZ &frag_Z, FragmentT &frag_T, FragmentAccumulator const &AB, FragmentCompute const &V) const { } }; //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator with bias vector broadcast over columns. /// /// Computes the following: /// /// /// Z, T = OutputOp(AB, C, Broadcast) /// /// if (ElementwiseOp::kStoreZ) { /// store(converted_u); /// } /// /// if (ElementwiseOp::kStoreT) { /// store(v); /// } /// template < typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors (z) typename TensorTileIterator_, ///< Additional tile iterator for tensor-valued operands (t) typename ElementVector_, ///< Pointer to broadcast vector typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename OutputOp_, ///< Output operator - concept is EpilogueWithBroadcastOp typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape) int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large (!IsEpilogueFunctorHeavy<OutputOp_>::value), bool IsSingleSource = OutputOp_::kIsSingleSource > class EpilogueWithBroadcast; template < typename Shape_, typename WarpMmaOperator_, int PartitionsK, typename OutputTileIterator_, typename TensorTileIterator_, typename ElementVector_, typename AccumulatorFragmentIterator_, typename WarpTileIterator_, typename SharedLoadIterator_, typename OutputOp_, typename Padding_, int FragmentsPerPartition, int IterationsUnroll > class EpilogueWithBroadcast< Shape_, WarpMmaOperator_, PartitionsK, OutputTileIterator_, TensorTileIterator_, ElementVector_, AccumulatorFragmentIterator_, WarpTileIterator_, SharedLoadIterator_, OutputOp_, Padding_, FragmentsPerPartition, IterationsUnroll, false > : public EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition> { public: using Base = EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition>; static bool const kIsSingleSource = false; using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; static int const kPartitionsK = PartitionsK; using OutputTileIterator = OutputTileIterator_; using TensorTileIterator = TensorTileIterator_; using ElementVector = ElementVector_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using SharedLoadIterator = SharedLoadIterator_; using OutputOp = OutputOp_; using Padding = Padding_; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// The complete warp-level accumulator tile using AccumulatorTile = typename Base::AccumulatorTile; /// Accumulator element using ElementAccumulator = typename WarpTileIterator::Element; /// Compute data type produced by the output op using ElementCompute = typename OutputOp::ElementCompute; /// Compute fragment using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>; /// Thread map used by output tile iterators using ThreadMap = typename OutputTileIterator::ThreadMap; /// Fragment object used to store the broadcast values using BroadcastFragment = Array< ElementCompute, ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Data type of additional tensor using ElementTensor = typename TensorTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; /// Tensor reference to destination tensor using TensorRef = typename OutputTileIterator::TensorRef; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Const tensor reference to source tensor using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; /// Array type used to output using OutputAccessType = Array< typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>; /// Tensor access type using TensorAccessType = Array<ElementTensor, OutputTileIterator::kElementsPerAccess>; /// Number of warps using WarpCount = typename Base::WarpCount; /// Shared memory allocation from epilogue base class using BaseSharedStorage = typename Base::SharedStorage; static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK; static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles; /// Used for the broadcast struct BroadcastDetail { /// Number of threads per warp static int const kWarpSize = 32; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; /// Number of distinct scalar column indices handled by each thread static int const kColumnsPerThread = ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess; /// Number of distinct scalar row indices handled by each thread static int const kRowsPerThread = ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn; /// Number of threads per threadblock static int const kThreadCount = kWarpSize * WarpCount::kCount; /// Number of distinct threads per row of output tile static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread); /// Number of distinct threads which must be reduced during the final reduction phase within the threadblock. static int const kThreadRows = kThreadCount / kThreadsPerRow; /// I'm not sure what I meant here. static int const kThreadAccessesPerRow = const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount); /// Shape of the shared memory allocation for the epilogue using StorageShape = MatrixShape< kThreadRows, Shape::kN >; /// Debug printing CUTLASS_DEVICE static void print() { #if 0 printf("BroadcastDetail {\n"); printf( " kColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n" "kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n", kColumnsPerThread, kRowsPerThread, kThreadCount, kThreadsPerRow, kThreadRows, kThreadAccessesPerRow, StorageShape::kRow, StorageShape::kColumn, StorageShape::kCount ); printf("};\n"); #endif } }; /// Shared storage structure (shadows base) with additional SMEM buffer for reduction struct SharedStorage { union { BaseSharedStorage base; }; CUTLASS_HOST_DEVICE SharedStorage() { } }; public: static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements, "Mismatch between shared load iterator and output tile iterator."); static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero."); static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess), "Divisibility"); private: /// Loads fragment from shared memory aligned with output tensor SharedLoadIterator shared_load_iterator_; /// Thread index within the threadblock int thread_idx_; public: /// Constructor CUTLASS_DEVICE EpilogueWithBroadcast( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): Base(shared_storage.base, thread_idx, warp_idx, lane_idx), shared_load_iterator_(shared_storage.base.reference(), thread_idx), thread_idx_(thread_idx) { } /// Streams the result to global memory CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator ElementVector const * broadcast_ptr, ///< Broadcast vector OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator1, ///< Tile iterator for first source accumulator matrix OutputTileIterator source_iterator2, ///< Tile iterator for second source accumulator matrix TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses MatrixCoord(Shape::kM, Shape::kN), MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space MatrixCoord()) { BroadcastFragment broadcast_fragment; load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset); if (!output_op.is_source_needed()) { compute_source_not_needed_( output_op, broadcast_fragment, destination_iterator, accumulators, tensor_iterator); } else { compute_source_needed_( output_op, broadcast_fragment, destination_iterator, accumulators, source_iterator1, source_iterator2, tensor_iterator); } } private: CUTLASS_DEVICE void load_broadcast_fragment_( BroadcastFragment & broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns ElementVector const * broadcast_ptr, ///< Broadcast vector MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space ) { broadcast_fragment.clear(); // If no pointer is supplied, set with all zeros and avoid memory accesses if (!broadcast_ptr) { return; } int thread_initial_column = ThreadMap::initial_offset(thread_idx_).column(); int thread_column_idx = threadblock_offset.column() + thread_initial_column; broadcast_ptr += thread_initial_column; NumericArrayConverter<ElementCompute, ElementVector, BroadcastDetail::kElementsPerAccess> converter; using AccessType = AlignedArray<ElementVector, BroadcastDetail::kElementsPerAccess>; using ComputeFragmentType = Array<ElementCompute, BroadcastDetail::kElementsPerAccess>; ComputeFragmentType *frag_ptr = reinterpret_cast<ComputeFragmentType *>(&broadcast_fragment); CUTLASS_PRAGMA_UNROLL for (int j = 0; j < ThreadMap::Iterations::kColumn; ++j) { AccessType loaded; loaded.clear(); if (thread_column_idx < problem_size.column()) { loaded = *reinterpret_cast<AccessType const *>(broadcast_ptr); } ComputeFragmentType cvt = converter(loaded); frag_ptr[j] = cvt; thread_column_idx += ThreadMap::Delta::kColumn; broadcast_ptr += ThreadMap::Delta::kColumn; } } template <class Seq> struct acc2smem_source_not_needed; template <size_t... Seq> struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> { template <int Advance> CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator, WarpTileIterator &warp_tile_iterator) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; } CUTLASS_PRAGMA_UNROLL for (int p = 0; p < Base::kFragmentsPerIteration; ++p) { typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); ++accum_fragment_iterator; warp_tile_iterator.store(accum_fragment); if (p < Base::kFragmentsPerIteration - 1) { warp_tile_iterator.add_pointer_offset(kSmemPointerOffset); } } if (Base::kFragmentsPerIteration > 1) { warp_tile_iterator.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration)); } } CUTLASS_DEVICE static void push(size_t pos, AccumulatorFragmentIterator const &iterator_begin, WarpTileIterator &warp_tile_iterator) { int dummy[] = { (pos == (Seq * Base::kFragmentsPerIteration)) && (helper<Seq * Base::kFragmentsPerIteration>(iterator_begin, warp_tile_iterator), 0)...}; CUTLASS_UNUSED(dummy[0]); } }; /// Streams the result to global memory CUTLASS_DEVICE void compute_source_not_needed_( OutputOp const &output_op, ///< Output operator BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand ) { // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // // CUTLASS_PRAGMA_UNROLL #pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration : 1) for (int iter = 0; iter < OutputTileIterator::kIterations; iter += Base::kFragmentsPerIteration) { // // Convert and store fragment // __syncthreads(); acc2smem_source_not_needed< cutlass::make_index_sequence<OutputTileIterator::kIterations / Base::kFragmentsPerIteration>>::push(iter, accum_fragment_iterator, this->warp_tile_iterator_); __syncthreads(); // // Load fragments from shared memory // CUTLASS_PRAGMA_UNROLL for (int p = 0; p < Base::kFragmentsPerIteration; ++p) { typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment[0]); if (p < Base::kFragmentsPerIteration - 1) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); } else if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); shared_load_iterator_.load(aligned_accum_fragment[i]); aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); } shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset); } // // Apply output operation // typename OutputTileIterator::Fragment frag_Z; typename TensorTileIterator::Fragment frag_T; apply_output_operator_source_not_needed_( frag_Z, frag_T, output_op, aligned_accum_fragment[0], broadcast_fragment); // // Conditionally store fragments // if (OutputOp::kStoreZ) { destination_iterator.store(frag_Z); ++destination_iterator; } if (OutputOp::kStoreT) { tensor_iterator.store(frag_T); ++tensor_iterator; } } if (Base::kFragmentsPerIteration > 1) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration)); } } } template<class Seq> struct acc2smem_source_needed; template <size_t... Seq> struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> { template<int Advance> CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator, WarpTileIterator &warp_tile_iterator) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; } typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); warp_tile_iterator.store(accum_fragment); } CUTLASS_DEVICE static void push(size_t pos, AccumulatorFragmentIterator const &iterator_begin, WarpTileIterator &warp_tile_iterator) { int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...}; } }; /// Streams the result to global memory CUTLASS_DEVICE void compute_source_needed_( OutputOp const &output_op, ///< Output operator BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator1, ///< Tile iterator for first source accumulator matrix OutputTileIterator source_iterator2, ///< Tile iterator for second source accumulator matrix TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand ) { typename OutputTileIterator::Fragment source_fragment1; source_fragment1.clear(); typename OutputTileIterator::Fragment source_fragment2; source_fragment2.clear(); // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // #pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1) for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) { // // Load the source // source_iterator1.load(source_fragment1); ++source_iterator1; source_iterator2.load(source_fragment2); ++source_iterator2; // // Convert and store fragment // __syncthreads(); acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push( iter, accum_fragment_iterator, this->warp_tile_iterator_); __syncthreads(); // // Load fragments from shared memory // typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment[0]); // If the number of k-slices is > 1 - perform a reduction amongst the k-slices if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { shared_load_iterator_.add_tile_offset({tile_row_offset , 0}); shared_load_iterator_.load(aligned_accum_fragment[i]); aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); } shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0}); } // // Apply output operation // typename OutputTileIterator::Fragment frag_Z; typename TensorTileIterator::Fragment frag_T; apply_output_operator_( frag_Z, frag_T, output_op, aligned_accum_fragment[0], source_fragment1, source_fragment2, broadcast_fragment); // // Conditionally store fragments // if (OutputOp::kStoreZ) { destination_iterator.store(frag_Z); ++destination_iterator; } if (OutputOp::kStoreT) { tensor_iterator.store(frag_T); ++tensor_iterator; } } } /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_( typename OutputTileIterator::Fragment &frag_Z, typename TensorTileIterator::Fragment &frag_T, OutputOp const &output_op, typename SharedLoadIterator::Fragment const &frag_AB, typename OutputTileIterator::Fragment const &frag_C1, typename OutputTileIterator::Fragment const &frag_C2, BroadcastFragment const &frag_Broadcast) { using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>; using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>; using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>; AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z); AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T); AccumulatorAccessType const *frag_AB_ptr = reinterpret_cast<AccumulatorAccessType const *>(&frag_AB); OutputAccessType const *frag_C1_ptr = reinterpret_cast<OutputAccessType const *>(&frag_C1); OutputAccessType const *frag_C2_ptr = reinterpret_cast<OutputAccessType const *>(&frag_C2); AccessTypeBroadcast const *frag_Broadcast_ptr = reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { output_op( frag_Z_ptr[i], frag_T_ptr[i], frag_AB_ptr[i], frag_C1_ptr[i], frag_C2_ptr[i], frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]); } } /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_source_not_needed_( typename OutputTileIterator::Fragment &frag_Z, typename TensorTileIterator::Fragment &frag_T, OutputOp const &output_op, typename SharedLoadIterator::Fragment const &frag_AB, BroadcastFragment const &frag_Broadcast) { using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>; using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>; using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>; AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z); AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T); AccumulatorAccessType const *frag_AB_ptr = reinterpret_cast<AccumulatorAccessType const *>(&frag_AB); AccessTypeBroadcast const *frag_Broadcast_ptr = reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { output_op( frag_Z_ptr[i], frag_T_ptr[i], frag_AB_ptr[i], frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]); } } }; template < typename Shape_, typename WarpMmaOperator_, int PartitionsK, typename OutputTileIterator_, typename TensorTileIterator_, typename ElementVector_, typename AccumulatorFragmentIterator_, typename WarpTileIterator_, typename SharedLoadIterator_, typename OutputOp_, typename Padding_, int FragmentsPerPartition, int IterationsUnroll > class EpilogueWithBroadcast< Shape_, WarpMmaOperator_, PartitionsK, OutputTileIterator_, TensorTileIterator_, ElementVector_, AccumulatorFragmentIterator_, WarpTileIterator_, SharedLoadIterator_, OutputOp_, Padding_, FragmentsPerPartition, IterationsUnroll, true > : public EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition> { public: using Base = EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition>; static bool const kIsSingleSource = true; using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; static int const kPartitionsK = PartitionsK; using OutputTileIterator = OutputTileIterator_; using TensorTileIterator = TensorTileIterator_; using ElementVector = ElementVector_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using SharedLoadIterator = SharedLoadIterator_; using OutputOp = OutputOp_; using Padding = Padding_; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// The complete warp-level accumulator tile using AccumulatorTile = typename Base::AccumulatorTile; /// Accumulator element using ElementAccumulator = typename WarpTileIterator::Element; /// Compute data type produced by the output op using ElementCompute = typename OutputOp::ElementCompute; /// Compute fragment using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>; /// Thread map used by output tile iterators using ThreadMap = typename OutputTileIterator::ThreadMap; /// Fragment object used to store the broadcast values using BroadcastFragment = Array< ElementCompute, ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Data type of additional tensor using ElementTensor = typename TensorTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; /// Tensor reference to destination tensor using TensorRef = typename OutputTileIterator::TensorRef; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Const tensor reference to source tensor using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; /// Array type used to output using OutputAccessType = Array< typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>; /// Tensor access type using TensorAccessType = Array<ElementTensor, OutputTileIterator::kElementsPerAccess>; /// Number of warps using WarpCount = typename Base::WarpCount; /// Shared memory allocation from epilogue base class using BaseSharedStorage = typename Base::SharedStorage; static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK; static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles; /// Used for the broadcast struct BroadcastDetail { /// Number of threads per warp static int const kWarpSize = 32; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; /// Number of distinct scalar column indices handled by each thread static int const kColumnsPerThread = ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess; /// Number of distinct scalar row indices handled by each thread static int const kRowsPerThread = ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn; /// Number of threads per threadblock static int const kThreadCount = kWarpSize * WarpCount::kCount; /// Number of distinct threads per row of output tile static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread); /// Number of distinct threads which must be reduced during the final reduction phase within the threadblock. static int const kThreadRows = kThreadCount / kThreadsPerRow; /// I'm not sure what I meant here. static int const kThreadAccessesPerRow = const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount); /// Shape of the shared memory allocation for the epilogue using StorageShape = MatrixShape< kThreadRows, Shape::kN >; /// Debug printing CUTLASS_DEVICE static void print() { #if 0 printf("BroadcastDetail {\n"); printf( " kColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n" "kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n", kColumnsPerThread, kRowsPerThread, kThreadCount, kThreadsPerRow, kThreadRows, kThreadAccessesPerRow, StorageShape::kRow, StorageShape::kColumn, StorageShape::kCount ); printf("};\n"); #endif } }; /// Shared storage structure (shadows base) with additional SMEM buffer for reduction struct SharedStorage { union { BaseSharedStorage base; }; CUTLASS_HOST_DEVICE SharedStorage() { } }; public: static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements, "Mismatch between shared load iterator and output tile iterator."); static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero."); static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess), "Divisibility"); private: /// Loads fragment from shared memory aligned with output tensor SharedLoadIterator shared_load_iterator_; /// Thread index within the threadblock int thread_idx_; public: /// Constructor CUTLASS_DEVICE EpilogueWithBroadcast( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): Base(shared_storage.base, thread_idx, warp_idx, lane_idx), shared_load_iterator_(shared_storage.base.reference(), thread_idx), thread_idx_(thread_idx) { } /// Streams the result to global memory CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator ElementVector const * broadcast_ptr, ///< Broadcast vector OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses MatrixCoord(Shape::kM, Shape::kN), MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space MatrixCoord()) { BroadcastFragment broadcast_fragment; load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset); if (!output_op.is_source_needed()) { compute_source_not_needed_( output_op, broadcast_fragment, destination_iterator, accumulators, tensor_iterator); } else { compute_source_needed_( output_op, broadcast_fragment, destination_iterator, accumulators, source_iterator, tensor_iterator); } } private: CUTLASS_DEVICE void load_broadcast_fragment_( BroadcastFragment & broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns ElementVector const * broadcast_ptr, ///< Broadcast vector MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space ) { broadcast_fragment.clear(); // If no pointer is supplied, set with all zeros and avoid memory accesses if (!broadcast_ptr) { return; } int thread_initial_column = ThreadMap::initial_offset(thread_idx_).column(); int thread_column_idx = threadblock_offset.column() + thread_initial_column; broadcast_ptr += thread_initial_column; NumericArrayConverter<ElementCompute, ElementVector, BroadcastDetail::kElementsPerAccess> converter; using AccessType = AlignedArray<ElementVector, BroadcastDetail::kElementsPerAccess>; using ComputeFragmentType = Array<ElementCompute, BroadcastDetail::kElementsPerAccess>; ComputeFragmentType *frag_ptr = reinterpret_cast<ComputeFragmentType *>(&broadcast_fragment); CUTLASS_PRAGMA_UNROLL for (int j = 0; j < ThreadMap::Iterations::kColumn; ++j) { AccessType loaded; loaded.clear(); if (thread_column_idx < problem_size.column()) { loaded = *reinterpret_cast<AccessType const *>(broadcast_ptr); } ComputeFragmentType cvt = converter(loaded); frag_ptr[j] = cvt; thread_column_idx += ThreadMap::Delta::kColumn; broadcast_ptr += ThreadMap::Delta::kColumn; } } template <class Seq> struct acc2smem_source_not_needed; template <size_t... Seq> struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> { template <int Advance> CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator, WarpTileIterator &warp_tile_iterator) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; } CUTLASS_PRAGMA_UNROLL for (int p = 0; p < Base::kFragmentsPerIteration; ++p) { typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); ++accum_fragment_iterator; warp_tile_iterator.store(accum_fragment); if (p < Base::kFragmentsPerIteration - 1) { warp_tile_iterator.add_pointer_offset(kSmemPointerOffset); } } if (Base::kFragmentsPerIteration > 1) { warp_tile_iterator.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration)); } } CUTLASS_DEVICE static void push(size_t pos, AccumulatorFragmentIterator const &iterator_begin, WarpTileIterator &warp_tile_iterator) { int dummy[] = { (pos == (Seq * Base::kFragmentsPerIteration)) && (helper<Seq * Base::kFragmentsPerIteration>(iterator_begin, warp_tile_iterator), 0)...}; CUTLASS_UNUSED(dummy[0]); } }; /// Streams the result to global memory CUTLASS_DEVICE void compute_source_not_needed_( OutputOp const &output_op, ///< Output operator BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand ) { // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // // CUTLASS_PRAGMA_UNROLL #pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration : 1) for (int iter = 0; iter < OutputTileIterator::kIterations; iter += Base::kFragmentsPerIteration) { // // Convert and store fragment // __syncthreads(); acc2smem_source_not_needed< cutlass::make_index_sequence<OutputTileIterator::kIterations / Base::kFragmentsPerIteration>>::push(iter, accum_fragment_iterator, this->warp_tile_iterator_); __syncthreads(); // // Load fragments from shared memory // CUTLASS_PRAGMA_UNROLL for (int p = 0; p < Base::kFragmentsPerIteration; ++p) { typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment[0]); if (p < Base::kFragmentsPerIteration - 1) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); } else if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); shared_load_iterator_.load(aligned_accum_fragment[i]); aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); } shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset); } // // Apply output operation // typename OutputTileIterator::Fragment frag_Z; typename TensorTileIterator::Fragment frag_T; apply_output_operator_source_not_needed_( frag_Z, frag_T, output_op, aligned_accum_fragment[0], broadcast_fragment); // // Conditionally store fragments // if (OutputOp::kStoreZ) { destination_iterator.store(frag_Z); ++destination_iterator; } if (OutputOp::kStoreT) { tensor_iterator.store(frag_T); ++tensor_iterator; } } if (Base::kFragmentsPerIteration > 1) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration)); } } } template<class Seq> struct acc2smem_source_needed; template <size_t... Seq> struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> { template<int Advance> CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator, WarpTileIterator &warp_tile_iterator) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; } typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); warp_tile_iterator.store(accum_fragment); } CUTLASS_DEVICE static void push(size_t pos, AccumulatorFragmentIterator const &iterator_begin, WarpTileIterator &warp_tile_iterator) { int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...}; } }; /// Streams the result to global memory CUTLASS_DEVICE void compute_source_needed_( OutputOp const &output_op, ///< Output operator BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand ) { typename OutputTileIterator::Fragment source_fragment; source_fragment.clear(); // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // #pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1) for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) { // // Load the source // source_iterator.load(source_fragment); ++source_iterator; // // Convert and store fragment // __syncthreads(); acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push( iter, accum_fragment_iterator, this->warp_tile_iterator_); __syncthreads(); // // Load fragments from shared memory // typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment[0]); // If the number of k-slices is > 1 - perform a reduction amongst the k-slices if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { shared_load_iterator_.add_tile_offset({tile_row_offset , 0}); shared_load_iterator_.load(aligned_accum_fragment[i]); aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); } shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0}); } // // Apply output operation // typename OutputTileIterator::Fragment frag_Z; typename TensorTileIterator::Fragment frag_T; apply_output_operator_( frag_Z, frag_T, output_op, aligned_accum_fragment[0], source_fragment, broadcast_fragment); // // Conditionally store fragments // if (OutputOp::kStoreZ) { destination_iterator.store(frag_Z); ++destination_iterator; } if (OutputOp::kStoreT) { tensor_iterator.store(frag_T); ++tensor_iterator; } } } /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_( typename OutputTileIterator::Fragment &frag_Z, typename TensorTileIterator::Fragment &frag_T, OutputOp const &output_op, typename SharedLoadIterator::Fragment const &frag_AB, typename OutputTileIterator::Fragment const &frag_C, BroadcastFragment const &frag_Broadcast) { using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>; using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>; using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>; AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z); AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T); AccumulatorAccessType const *frag_AB_ptr = reinterpret_cast<AccumulatorAccessType const *>(&frag_AB); OutputAccessType const *frag_C_ptr = reinterpret_cast<OutputAccessType const *>(&frag_C); AccessTypeBroadcast const *frag_Broadcast_ptr = reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { output_op( frag_Z_ptr[i], frag_T_ptr[i], frag_AB_ptr[i], frag_C_ptr[i], frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]); } } /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_source_not_needed_( typename OutputTileIterator::Fragment &frag_Z, typename TensorTileIterator::Fragment &frag_T, OutputOp const &output_op, typename SharedLoadIterator::Fragment const &frag_AB, BroadcastFragment const &frag_Broadcast) { using AccessTypeZ = Array<typename OutputTileIterator::Element, kElementsPerAccess>; using AccessTypeT = Array<typename TensorTileIterator::Element, kElementsPerAccess>; using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>; AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z); AccessTypeT *frag_T_ptr = reinterpret_cast<AccessTypeT *>(&frag_T); AccumulatorAccessType const *frag_AB_ptr = reinterpret_cast<AccumulatorAccessType const *>(&frag_AB); AccessTypeBroadcast const *frag_Broadcast_ptr = reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { output_op( frag_Z_ptr[i], frag_T_ptr[i], frag_AB_ptr[i], frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]); } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
52,430
C
33.02401
128
0.666183
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_with_visitor.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Generic epilogue for implementing certain kinds of fused epilogue behavior. */ #pragma once ///////////////////////////////////////////////////////////////////////////////////////////////// #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/matrix_coord.h" #include "cutlass/semaphore.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" //////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////////////////////////// class EpilogueFusedVisitorConcept { public: static int const kIterations = 1; static int const kElementsPerAccess = 4; using ElementOutput = float; using ElementAccumulator = float; using AccumulatorFragment = Array<ElementAccumulator, kElementsPerAccess>; /// Arguments structure struct Arguments { }; /// Params structure struct Params { Params() { } Params(Arguments const &args) { } }; /// Shared storage struct SharedStorage { }; public: CUTLASS_DEVICE EpilogueFusedVisitorConcept( Params const &params, ///< Parameters routed to the epilogue SharedStorage &shared_storage, ///< Shared storage needed by the functors here MatrixCoord const &problem_size, ///< Problem size of the output int thread_idx, ///< Thread index within the threadblock int warp_idx, ///< Warp index within the threadblock int lane_idx, ///< Lane index within the warp MatrixCoord const &threadblock_offset = MatrixCoord(0, 0)) { ///< Coordinate } /// Helper to indicate split-K behavior CUTLASS_DEVICE void set_k_partition( int split_k_index, ///< Index of this threadblock within split-K partitioned scheme int split_k_slices) { ///< Total number of split-K slices } /// Called to set the batch index CUTLASS_DEVICE void set_batch_index(int batch_idx) { } /// Called at the start of the epilogue just before iterating over accumulator slices CUTLASS_DEVICE void begin_epilogue() { } /// Called at the start of one step before starting accumulator exchange CUTLASS_DEVICE void begin_step(int step_idx) { } /// Called at the start of a row CUTLASS_DEVICE void begin_row(int row_idx) { } /// Called after accumulators have been exchanged for each accumulator vector CUTLASS_DEVICE void visit( int iter_idx, int row_idx, int column_idx, int frag_idx, AccumulatorFragment const &accum) { } /// Called at the end of a row CUTLASS_DEVICE void end_row(int row_idx) { } /// Called after all accumulator elements have been visited CUTLASS_DEVICE void end_step(int step_idx) { } /// Called after all steps have been completed CUTLASS_DEVICE void end_epilogue() { } }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// Epilogue operator template < typename Visitor_, ///< Functor containing fused operations (satisfies EpilogueFusedVisitorConcept) typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape) int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large (true || !IsEpilogueFunctorHeavy<Visitor_>::value) > class EpilogueWithVisitor : public EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition> { public: using Visitor = Visitor_; using Base = EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition>; using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; static int const kPartitionsK = PartitionsK; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using SharedLoadIterator = SharedLoadIterator_; using Padding = Padding_; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// The complete warp-level accumulator tile using AccumulatorTile = typename Base::AccumulatorTile; /// Accumulator element using ElementAccumulator = typename WarpTileIterator::Element; /// Output access size static int const kElementsPerAccess = Visitor::kElementsPerAccess; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Array type used by output functor using AccumulatorAccessType = Array< typename WarpTileIterator::Element, kElementsPerAccess>; /// Number of warps using WarpCount = typename Base::WarpCount; static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK; static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles; using SharedStorage = typename Base::SharedStorage; private: /// Loads fragment from shared memory aligned with output tensor SharedLoadIterator shared_load_iterator_; public: /// Constructor CUTLASS_DEVICE EpilogueWithVisitor( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): Base(shared_storage, thread_idx, warp_idx, lane_idx), shared_load_iterator_(shared_storage.reference(), thread_idx) { } /// Streams the result to global memory CUTLASS_DEVICE void operator()( Visitor & visitor, AccumulatorTile const &accumulators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) visitor.begin_epilogue(); // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // #pragma unroll(IterationsUnroll ? Visitor::kIterations : 1) for (int iter_idx = 0; iter_idx < Visitor::kIterations; ++iter_idx) { // // Load the source // visitor.begin_step(iter_idx); // // Convert and store fragment // __syncthreads(); acc2smem_source_needed<cutlass::make_index_sequence<Visitor::kIterations>>::push( iter_idx, accum_fragment_iterator, this->warp_tile_iterator_); __syncthreads(); // // Load fragments from shared memory // typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment[0]); // If the number of k-slices is > 1 - perform a reduction amongst the k-slices if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); shared_load_iterator_.load(aligned_accum_fragment[i]); aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); } shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset); } // // Iterate over output fragments // AccumulatorAccessType const *accum_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment[0]); int const kAccumulatorFragmentCount = AccumulatorTile::kElements / (Visitor::kIterations * AccumulatorAccessType::kElements); CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < kAccumulatorFragmentCount; ++idx) { int row_idx = idx / SharedLoadIterator::ThreadMap::Iterations::kColumn; int col_idx = idx % SharedLoadIterator::ThreadMap::Iterations::kColumn; // Start a new row of the output fragment if (!col_idx) { visitor.begin_row(row_idx); } visitor.visit( iter_idx, row_idx, col_idx, idx, accum_frag_ptr[idx] ); // End the row of the output fragment if (col_idx + 1 == SharedLoadIterator::ThreadMap::Iterations::kColumn) { visitor.end_row(row_idx); } } // // Conclude the step // visitor.end_step(iter_idx); } visitor.end_epilogue(); } private: template<class Seq> struct acc2smem_source_needed; template <size_t... Seq> struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> { template<int Advance> CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator, WarpTileIterator &warp_tile_iterator) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; } typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); warp_tile_iterator.store(accum_fragment); } CUTLASS_DEVICE static void push(size_t pos, AccumulatorFragmentIterator const &iterator_begin, WarpTileIterator &warp_tile_iterator) { int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...}; } }; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Helper to create an EpilogueWithVisitor from an existing epilogue template <typename Visitor_, typename Existing_, bool IterationsUnroll = true> struct EpilogueWithVisitorFromExistingEpilogue { using Epilogue = EpilogueWithVisitor< Visitor_, typename Existing_::Shape, typename Existing_::WarpMmaOperator, Existing_::kPartitionsK, typename Existing_::AccumulatorFragmentIterator, typename Existing_::WarpTileIterator, typename Existing_::SharedLoadIterator, typename Existing_::Padding, Existing_::kFragmentsPerIteration, IterationsUnroll >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
13,454
C
31.817073
131
0.62539
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_depthwise.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for Depthwise convoltuion The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/epilogue/thread/conversion_op.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/reduction_op.h" #include "cutlass/gemm/gemm.h" #include "cutlass/numeric_types.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator template <typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename ThreadOutputShape_, /// Size of the matrix to load (concept: TensorNHWC) typename ThreadBlockOutputShape_, /// Size of the matrix to load (concept: TensorNHWC) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: ///< gemm::warp::MmaTensorOp) typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename OutputOp_, ///< Output operator typename Padding_ ///< Padding added to SMEM allocation to avoid bank conflicts (concept: ///< MatrixShape) > class EpilogueDepthwise { public: using Shape = Shape_; using WarpShape = typename WarpMmaOperator_::Shape; using ThreadOutputShape = ThreadOutputShape_; using ThreadBlockOutputShape = ThreadBlockOutputShape_; using WarpMmaOperator = WarpMmaOperator_; using OutputTileIterator = OutputTileIterator_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using SharedLoadIterator = SharedLoadIterator_; using OutputOp = OutputOp_; using Padding = Padding_; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// The complete warp-level accumulator tile using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile; /// Accumulator element using ElementAccumulator = typename WarpTileIterator::Element; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; /// Tensor reference to destination tensor using TensorRef = typename OutputTileIterator::TensorRef; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Const tensor reference to source tensor using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; /// Array type used to output using OutputAccessType = Array<typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Number of warps using WarpCount = gemm::GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN>; public: static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements, "Mismatch between shared load iterator and output tile iterator."); static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero."); static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess), "Divisibility"); /// Shared storage allocation needed by the epilogue struct SharedStorage { // // Type definitions // /// Element type of shared memory using Element = typename WarpTileIterator::Element; /// Tensor reference to shared memory allocation using TensorRef = typename WarpTileIterator::TensorRef; /// Layout of shared memory allocation using Layout = typename WarpTileIterator::Layout; /// Logical shape of the shared memory tile written to by all warps. using Shape = MatrixShape<ThreadBlockOutputShape::kNHW, ThreadBlockOutputShape::kC>; /// Shape of the shared memory allocation for the epilogue using StorageShape = MatrixShape<Shape::kRow, Shape::kColumn>; // // Data members // AlignedBuffer<Element, StorageShape::kCount> storage; // // Methods // /// Returns a pointer to the shared memory buffer CUTLASS_DEVICE Element *data() { return storage.data(); } /// Returns a tensor reference to the shared memory buffer CUTLASS_DEVICE TensorRef reference() { return TensorRef(storage.data(), Layout::packed({StorageShape::kRow, StorageShape::kColumn})); } }; private: /// Loads fragment from shared memory aligned with output tensor SharedLoadIterator shared_load_iterator_; /// Stores a warp's fragment of accumulators to SMEM WarpTileIterator warp_tile_iterator_; LongIndex warp_offset; int thread_idx; int warp_idx; int lane_idx; int warp_m, warp_n; // warp coordinates within a cta int tid_m, tid_n; // thread coordinates within a warp public: /// Constructor CUTLASS_DEVICE EpilogueDepthwise(SharedStorage &shared_storage, ///< Shared storage object int thread_idx_, ///< ID of a thread within the threadblock int warp_idx_, ///< ID of warp within threadblock int lane_idx_ ///< Id of thread within warp ) : thread_idx(thread_idx_), warp_idx(warp_idx_), lane_idx(lane_idx_), shared_load_iterator_(shared_storage.reference(), thread_idx_), warp_tile_iterator_(shared_storage.reference(), thread_idx_, lane_idx_) {} /// Streams the result to global memory CUTLASS_DEVICE void operator()(OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator, ///< Threadblock tile coordinate in GEMM (in ///< units of threadblock tiles) const int smem_base_offset) { ///< SMEM base offset for epilogue operation // initiate the smem base offset for different output tile. warp_tile_iterator_.set_smem_base_address(smem_base_offset); shared_load_iterator_.set_smem_base_address(smem_base_offset); if (!output_op.is_source_needed()) { compute_source_not_needed_(output_op, destination_iterator, accumulators); } else { compute_source_needed_(output_op, destination_iterator, accumulators, source_iterator); } } private: /// Streams the result to global memory CUTLASS_DEVICE void compute_source_needed_( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) typename OutputTileIterator::Fragment source_fragment; source_fragment.clear(); source_iterator.load(source_fragment); // store to smem warp_tile_iterator_.store(accumulators); __syncthreads(); typename SharedLoadIterator::Fragment aligned_accum_fragment; // load from smem shared_load_iterator_.load(aligned_accum_fragment); typename OutputTileIterator::Fragment output_fragment; apply_output_operator_(output_fragment, output_op, aligned_accum_fragment, source_fragment); // Store to GMEM destination_iterator.store(output_fragment); } /// Streams the result to global memory CUTLASS_DEVICE void compute_source_not_needed_( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) // store to smem warp_tile_iterator_.store(accumulators); __syncthreads(); typename SharedLoadIterator::Fragment aligned_accum_fragment; // load from smem shared_load_iterator_.load(aligned_accum_fragment); typename OutputTileIterator::Fragment output_fragment; apply_output_operator_source_not_needed_(output_fragment, output_op, aligned_accum_fragment); // Store to GMEM destination_iterator.store(output_fragment); } /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_( typename OutputTileIterator::Fragment &output_fragment, OutputOp const &output_op, ///< Output operator typename SharedLoadIterator::Fragment const &aligned_accum_fragment, typename OutputTileIterator::Fragment const &source_fragment) { OutputAccessType *output_frag_ptr = reinterpret_cast<OutputAccessType *>(&output_fragment); AccumulatorAccessType const *compute_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment); OutputAccessType const *source_frag_ptr = reinterpret_cast<OutputAccessType const *>(&source_fragment); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { // Call the output operator output_frag_ptr[i] = output_op(compute_frag_ptr[i], source_frag_ptr[i]); } } /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_source_not_needed_( typename OutputTileIterator::Fragment &output_fragment, OutputOp const &output_op, ///< Output operator typename SharedLoadIterator::Fragment const &aligned_accum_fragment) { OutputAccessType *output_frag_ptr = reinterpret_cast<OutputAccessType *>(&output_fragment); AccumulatorAccessType const *compute_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { // Call the output operator output_frag_ptr[i] = output_op(compute_frag_ptr[i]); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
13,424
C
38.955357
120
0.674836
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/memory.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { //////////////////////////////////////////////////////////////////////////////// namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load and store output tile from global memory in epilogue. /// /// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_ ///< Element data type > class PredicatedTileIteratorStridedDgrad { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::Shape; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kThreads = ThreadMap::kThreads; static int const kIterations = ThreadMap::Count::kTile; static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0"); static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0"); static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0"); static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0"); /// Fragment object using Fragment = Array< Element, ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; // // Parameters struct // /// Uses a non-template class struct Params : PredicatedTileIteratorParams { /// Convolution problem size cutlass::conv::Conv2dProblemSize problem_size; int tiled_rows_per_filter; CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Layout const &layout, cutlass::conv::Conv2dProblemSize problem_size_, int threadblock_row): problem_size(problem_size_), PredicatedTileIteratorParams( layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess, make_OutputTileThreadMapDesc<ThreadMap>() ) { int tile_m_per_filter = strided_dgrad_tile_m_per_filter(problem_size, threadblock_row); tiled_rows_per_filter = tile_m_per_filter * threadblock_row; } }; /// Mask object struct Mask { static int const kCount = ThreadMap::Iterations::kColumn; /// Predicate state bool predicates[kCount]; // // Mask // CUTLASS_HOST_DEVICE Mask() { enable(); } ///< Efficiently disables all accesses guarded by mask CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = false; } } ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask CUTLASS_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = true; } } }; private: // // Data members // /// Parameters structure containing reference and precomputed state. Params params_; /// Byte-level pointer uint8_t *byte_pointer_; /// Array of boolean values to contain steady-state predicates Mask mask_; /// Extent of the matrix tile in rows Index extent_row_; /// Starting Dx h and w dimenstion for strided dgrad mapping int start_h_, start_w_; /// Effective Dy P and Q dimenstions for strided dgrad mapping int p_, q_; /// A thread's starting row position (assuming steady-state predicates have been computed) Index thread_start_row_; /// A thread's starting column position (assuming steady-state predicates have been computed) Index thread_start_column_; /// Internal state counter int state_[3]; // // Static asserts about internal strides // static_assert(sizeof(extent_row_) == 4, "Expected 32b extents"); static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents"); static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides"); private: // // Methods // public: // // Methods // /// Constructor CUTLASS_DEVICE PredicatedTileIteratorStridedDgrad( Params const & params, Element *pointer, TensorCoord extent, int thread_idx, FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod, int start_r, int start_s, TensorCoord threadblock_offset = TensorCoord() ): params_(params) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset; int r = start_r; int s = start_s; if (params_.problem_size.mode == cutlass::conv::Mode::kConvolution) { r = (params_.problem_size.R - 1 - r); s = (params_.problem_size.S - 1 - s); } // compute starting coordinates in Dx start_h_ and start_w_ strided_dgrad_starting_coords( params_.problem_size, stride_h_divmod, stride_w_divmod, r, s, start_h_, start_w_); p_ = (params_.problem_size.H - start_h_ + params_.problem_size.stride_h - 1) / params_.problem_size.stride_h; q_ = (params_.problem_size.W - start_w_ + params_.problem_size.stride_w - 1) / params_.problem_size.stride_w; extent_row_ = extent.row(); thread_start_row_ = thread_offset.row(); thread_start_column_ = thread_offset.column(); // Initialize predicates CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) { mask_.predicates[c] = ((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column()); } // Null pointer performs no accesses if (!pointer) { mask_.clear(); } // Initialize pointer byte_pointer_ = reinterpret_cast<uint8_t *>(pointer); // Initialize internal state counter state_[0] = state_[1] = state_[2] = 0; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment &frag, int64_t byte_offset) { uint8_t *byte_pointer = byte_pointer_; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; // remapping rows to find the mapped_row_offset int npq_offset = (row_offset + thread_start_row_) % params_.tiled_rows_per_filter; // (STEP 4.a) [order NHW rows to be loaded and stored in output Dx NHWxC layout] int n = npq_offset / (p_ * q_); int residual = npq_offset % (p_ * q_); int p = residual / q_; int q = residual % q_; int mapped_row_offset = n * (params_.problem_size.H * params_.problem_size.W) + (start_h_ + p * params_.problem_size.stride_h) * params_.problem_size.W + (start_w_ + q * params_.problem_size.stride_w); bool row_guard = mapped_row_offset < extent_row_; int64_t row_byte_offset = mapped_row_offset * params_.stride; CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { int64_t column_byte_offset = (thread_start_column_ + column * ThreadMap::Delta::kColumn) * (sizeof_bits<Element>::value / 8); bool guard = row_guard && mask_.predicates[column]; cutlass::arch::global_load< AccessType, sizeof(AccessType) >( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)(byte_pointer + row_byte_offset + column_byte_offset + byte_offset), guard); } } } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_byte_offset(frag, 0); } /// Stores a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) { uint8_t *byte_pointer = byte_pointer_; AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; // remapping rows to find the mapped_row_offset int npq_offset = (row_offset + thread_start_row_) % params_.tiled_rows_per_filter; // (STEP 4.a) [order NHW rows to be loaded and stored in output Dx NHWxC layout] int n = npq_offset / (p_ * q_); int residual = npq_offset % (p_ * q_); int p = residual / q_; int q = residual % q_; int mapped_row_offset = n * (params_.problem_size.H * params_.problem_size.W) + (start_h_ + p * params_.problem_size.stride_h) * params_.problem_size.W + (start_w_ + q * params_.problem_size.stride_w); bool row_guard = mapped_row_offset < extent_row_; int64_t row_byte_offset = mapped_row_offset * params_.stride; CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { int64_t column_byte_offset = (thread_start_column_ + column * ThreadMap::Delta::kColumn) * (sizeof_bits<Element>::value / 8); bool guard = row_guard && mask_.predicates[column]; cutlass::arch::global_store<AccessType, sizeof(AccessType) >( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)(byte_pointer + row_byte_offset + column_byte_offset + byte_offset), guard); } } } } } /// Stores a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_byte_offset(frag, 0); } /// Advances to the next position to load or store CUTLASS_HOST_DEVICE PredicatedTileIteratorStridedDgrad &operator++() { ++state_[0]; thread_start_row_ += ThreadMap::Shape::kRow; if (state_[0] == ThreadMap::Count::kRow) { state_[0] = 0; ++state_[1]; thread_start_row_ += (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow; if (state_[1] == ThreadMap::Count::kGroup) { state_[1] = 0; ++state_[2]; thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow; if (state_[2] == ThreadMap::Count::kCluster) { state_[2] = 0; } } } return *this; } ///< Efficiently disables all accesses guarded by mask CUTLASS_DEVICE void clear_mask() { mask_.clear(); } ///< Efficiently enables all accesses guarded by mask CUTLASS_DEVICE void enable_mask() { mask_.enable(); } ///< Sets the mask CUTLASS_DEVICE void get_mask(Mask &mask) { mask = mask_; } ///< Sets the mask CUTLASS_DEVICE void set_mask(Mask const &mask) { mask_ = mask; } }; /////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
15,536
C
31.36875
137
0.618177
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_planar_complex.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/array_planar_complex.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/functional.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator for planar-complex output representations. /// /// Note, as with most CUTLASS components for planar complex, the template arguments describe /// the underlying real data type. template < typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename OutputOp_, ///< Output operator typename Padding_ ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape) > class EpiloguePlanarComplex { public: using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; static int const kPartitionsK = PartitionsK; using OutputTileIterator = OutputTileIterator_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using SharedLoadIterator = SharedLoadIterator_; using OutputOp = OutputOp_; using Padding = Padding_; /// Output layout is always row-major using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// The complete warp-level accumulator tile using AccumulatorTile = ArrayPlanarComplex< typename WarpMmaOperator::FragmentC::Element, WarpMmaOperator::FragmentC::kElements >; /// Accumulator element using ElementAccumulator = typename WarpTileIterator::Element; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; /// Tensor reference to destination tensor using TensorRef = typename OutputTileIterator::TensorRef; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Const tensor reference to source tensor using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; /// Array type used to output using OutputAccessType = Array< typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Shape of each warp-level operation using WarpShape = typename WarpMmaOperator::Shape; /// Number of warps using WarpCount = gemm::GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, kPartitionsK >; /// Shared memory allocation struct SharedStorage { // // Type definitions // /// Element type of shared memory using Element = typename WarpTileIterator::Element; /// Tensor reference to shared memory allocation using TensorRef = typename WarpTileIterator::TensorRef; /// Layout of shared memory allocation using Layout = typename WarpTileIterator::Layout; /// Logical shape of the shared memory tile written to by all warps. using Shape = MatrixShape< WarpCount::kM * WarpTileIterator::Shape::kRow * WarpCount::kK, WarpCount::kN * WarpTileIterator::Shape::kColumn >; /// Shape of the shared memory allocation for the epilogue using StorageShape = MatrixShape< Shape::kRow + Padding::kRow, Shape::kColumn + Padding::kColumn >; static int const kImaginaryStride = StorageShape::kCount; // // Data members // AlignedBuffer<Element, kImaginaryStride * 2> storage; // // Methods // /// Returns a pointer to the shared memory buffer CUTLASS_DEVICE Element *data() { return storage.data(); } /// Returns a tensor reference to the shared memory buffer CUTLASS_DEVICE TensorRef reference() { return TensorRef( storage.data(), Layout::packed({StorageShape::kRow, StorageShape::kColumn})); } }; private: // // Data members // SharedStorage &shared_storage_; /// Loads fragment from shared memory aligned with output tensor SharedLoadIterator shared_load_iterator_; /// Stores a warp's fragment of accumulators to SMEM WarpTileIterator warp_tile_iterator_; public: /// Constructor CUTLASS_DEVICE EpiloguePlanarComplex( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): shared_storage_(shared_storage), shared_load_iterator_(shared_storage.reference(), thread_idx), warp_tile_iterator_(shared_storage.reference(), lane_idx) { // Compute warp location within threadblock tile by mapping the warp_id to three coordinates: // // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_k = warp_idx / (WarpCount::kM * WarpCount::kN); int warp_mn = warp_idx % (WarpCount::kM * WarpCount::kN); int warp_m = warp_mn % WarpCount::kM; int warp_n = warp_mn / WarpCount::kM; MatrixCoord warp_offset{warp_k * WarpCount::kM + warp_m, warp_n}; warp_tile_iterator_.add_tile_offset(warp_offset); } /// Streams the result to global memory CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator OutputTileIterator destination_iterator_real, ///< Tile iterator for destination OutputTileIterator destination_iterator_imag, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator_real, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) OutputTileIterator source_iterator_imag) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) typename OutputTileIterator::Fragment source_fragment_real; typename OutputTileIterator::Fragment source_fragment_imag; if (!output_op.is_source_needed()) { source_iterator_real.clear_mask(); source_iterator_imag.clear_mask(); } source_fragment_real.clear(); source_fragment_imag.clear(); // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator_real(accumulators.real); AccumulatorFragmentIterator accum_fragment_iterator_imag(accumulators.imag); // // Iterate over accumulator tile // CUTLASS_PRAGMA_UNROLL for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) { // // Load the source // source_iterator_real.load(source_fragment_real); source_iterator_imag.load(source_fragment_imag); ++source_iterator_real; ++source_iterator_imag; // // Convert and store fragment // __syncthreads(); typename AccumulatorFragmentIterator::Fragment accum_fragment_real; typename AccumulatorFragmentIterator::Fragment accum_fragment_imag; accum_fragment_iterator_real.load(accum_fragment_real); accum_fragment_iterator_imag.load(accum_fragment_imag); ++accum_fragment_iterator_real; ++accum_fragment_iterator_imag; this->warp_tile_iterator_.store(accum_fragment_real); this->warp_tile_iterator_.store_with_pointer_offset(accum_fragment_imag, SharedStorage::kImaginaryStride); __syncthreads(); // // Load fragments from shared memory // typename SharedLoadIterator::Fragment aligned_accum_fragment_real[kPartitionsK]; typename SharedLoadIterator::Fragment aligned_accum_fragment_imag[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment_real[0]); shared_load_iterator_.load_with_pointer_offset(aligned_accum_fragment_imag[0], SharedStorage::kImaginaryStride); // If the number of k-slices is > 1 - perform a reduction amongst the k-slices static_assert(kPartitionsK == 1, "Sliced-K not supported for planar complex at this time"); // // Compute the output result // typename OutputTileIterator::Fragment output_fragment_real; typename OutputTileIterator::Fragment output_fragment_imag; apply_output_operator_( output_fragment_real, output_fragment_imag, output_op, aligned_accum_fragment_real[0], aligned_accum_fragment_imag[0], source_fragment_real, source_fragment_imag); // // Store the final result // destination_iterator_real.store(output_fragment_real); destination_iterator_imag.store(output_fragment_imag); ++destination_iterator_real; ++destination_iterator_imag; } } private: /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_( typename OutputTileIterator::Fragment &output_fragment_real, typename OutputTileIterator::Fragment &output_fragment_imag, OutputOp const &output_op, ///< Output operator typename SharedLoadIterator::Fragment const &aligned_accum_fragment_real, typename SharedLoadIterator::Fragment const &aligned_accum_fragment_imag, typename OutputTileIterator::Fragment const &source_fragment_real, typename OutputTileIterator::Fragment const &source_fragment_imag) { OutputAccessType *output_frag_real_ptr = reinterpret_cast<OutputAccessType *>(&output_fragment_real); OutputAccessType *output_frag_imag_ptr = reinterpret_cast<OutputAccessType *>(&output_fragment_imag); AccumulatorAccessType const *compute_frag_real_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment_real); AccumulatorAccessType const *compute_frag_imag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment_imag); OutputAccessType const *source_frag_real_ptr = reinterpret_cast<OutputAccessType const *>(&source_fragment_real); OutputAccessType const *source_frag_imag_ptr = reinterpret_cast<OutputAccessType const *>(&source_fragment_imag); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { // Call the output operator auto result_fragment = output_op( make_ArrayPlanarComplex(compute_frag_real_ptr[i], compute_frag_imag_ptr[i]), make_ArrayPlanarComplex(source_frag_real_ptr[i], source_frag_imag_ptr[i]) ); output_frag_real_ptr[i] = result_fragment.real; output_frag_imag_ptr[i] = result_fragment.imag; } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
14,610
C
35.345771
128
0.680767
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_with_reduction.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/functional.h" #include "cutlass/fast_math.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator with reduction over each column template < typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors typename TensorTileIterator_, ///< Additional tile iterator for tensor-valued operands typename ElementVector_, ///< Pointer to reduction vector typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename OutputOp_, ///< Output operator typename ReductionOp_, ///< Reduction operator typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape) int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large (!IsEpilogueFunctorHeavy<OutputOp_>::value) > class EpilogueWithReduction : public EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_> { public: using Base = EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_>; using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; static int const kPartitionsK = PartitionsK; using OutputTileIterator = OutputTileIterator_; using TensorTileIterator = TensorTileIterator_; using ElementVector = ElementVector_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using SharedLoadIterator = SharedLoadIterator_; using OutputOp = OutputOp_; using ReductionOp = ReductionOp_; using Padding = Padding_; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; static bool const kIsSingleSource = true; /// The complete warp-level accumulator tile using AccumulatorTile = typename Base::AccumulatorTile; /// Accumulator element using ElementAccumulator = typename WarpTileIterator::Element; /// Compute data type produced by the output op using ElementCompute = typename OutputOp::ElementCompute; /// Compute fragment using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>; /// Thread map used by output tile iterators using ThreadMap = typename OutputTileIterator::ThreadMap; /// Fragment object used in reduction using ReductionFragment = Array< ElementAccumulator, ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Data type of additional tensor using ElementTensor = typename TensorTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; /// Tensor reference to destination tensor using TensorRef = typename OutputTileIterator::TensorRef; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Const tensor reference to source tensor using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; /// Array type used to output using OutputAccessType = Array< typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>; /// Tensor access type using TensorAccessType = Array<ElementTensor, OutputTileIterator::kElementsPerAccess>; /// Number of warps using WarpCount = typename Base::WarpCount; /// Shared memory allocation from epilogue base class using BaseSharedStorage = typename Base::SharedStorage; /// Used for the reduction struct ReductionDetail { /// If true, accumulator coordinates are computed and out-of-bounds checks are enabled when /// performing the reduction. static bool const kOobCheck = false; /// Number of threads per warp static int const kWarpSize = 32; /// Number of distinct scalar column indices handled by each thread static int const kColumnsPerThread = ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess; /// Number of distinct scalar row indices handled by each thread static int const kRowsPerThread = ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn; /// Number of threads per threadblock static int const kThreadCount = kWarpSize * WarpCount::kCount; /// Number of distinct threads per row of output tile static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread); /// Number of distinct threads which must be reduced during the final reduction phase within the threadblock. static int const kThreadRows = kThreadCount / kThreadsPerRow; /// I'm not sure what I meant here. static int const kThreadAccessesPerRow = const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount); /// Shape of the shared memory allocation for the epilogue using StorageShape = MatrixShape< kThreadRows, Shape::kN >; /// Debug printing CUTLASS_DEVICE static void print() { #if 0 printf("ReductionDetail {\n"); printf( " kElementsPerAccess:%d\nkColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n" "kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n", kElementsPerAccess, kColumnsPerThread, kRowsPerThread, kThreadCount, kThreadsPerRow, kThreadRows, kThreadAccessesPerRow, StorageShape::kRow, StorageShape::kColumn, StorageShape::kCount ); printf("};\n"); #endif } }; /// Shared storage structure (shadows base) with additional SMEM buffer for reduction struct SharedStorage { union { BaseSharedStorage base; AlignedArray<ElementAccumulator, ReductionDetail::StorageShape::kCount, 16> reduction; ///< Shared storage for reduction }; CUTLASS_HOST_DEVICE SharedStorage() { } }; public: static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements, "Mismatch between shared load iterator and output tile iterator."); static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero."); static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess), "Divisibility"); private: /// Loads fragment from shared memory aligned with output tensor SharedLoadIterator shared_load_iterator_; /// Shared memory pointer fo rreduction ElementAccumulator *reduction_ptr_; /// Thread index within the threadblock int thread_idx_; public: /// Constructor CUTLASS_DEVICE EpilogueWithReduction( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): Base(shared_storage.base, thread_idx, warp_idx, lane_idx), shared_load_iterator_(shared_storage.base.reference(), thread_idx), reduction_ptr_(shared_storage.reduction.data()), thread_idx_(thread_idx) { } /// Streams the result to global memory CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator ElementVector * reduction_output_ptr, ///< Reduction output vector OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additional tensor operand MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses MatrixCoord(Shape::kM, Shape::kN), MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space MatrixCoord()) { ReductionFragment reduction_fragment; reduction_fragment.clear(); if (!output_op.is_source_needed()) { compute_source_not_needed_( output_op, reduction_fragment, destination_iterator, accumulators, tensor_iterator, problem_size, threadblock_offset); } else { compute_source_needed_( output_op, reduction_fragment, destination_iterator, accumulators, source_iterator, tensor_iterator, problem_size, threadblock_offset); } if (output_op.participates_in_reduction()) { reduction_(problem_size, threadblock_offset, reduction_output_ptr, reduction_fragment); } } private: /// Perform the reduction CUTLASS_DEVICE void reduction_( MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses MatrixCoord const &threadblock_offset, ///< Problem size needed to guard against out-of-bounds accesses ElementVector * reduction_output_ptr, ///< Reduction output vector ReductionFragment const & reduction_fragment) { // // Store the partially reduced value to SMEM // // Guard against uses of the existing SMEM tile __syncthreads(); using AccessType = AlignedArray<ElementAccumulator, ThreadMap::kElementsPerAccess>; // // Determine a compacted thread arrangement to store to SMEM. // int const kThreadsPerRow = Shape::kN / (ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess); MatrixCoord thread_offset( thread_idx_ / kThreadsPerRow, (thread_idx_ % kThreadsPerRow) * ThreadMap::kElementsPerAccess); // // Each thread store its fragment to a SMEM // AccessType *aligned_reduction_ptr = reinterpret_cast<AccessType *>( &reduction_ptr_[thread_offset.row() * Shape::kN + thread_offset.column()]); AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&reduction_fragment); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { int col_idx = column * ThreadMap::Delta::kColumn / ThreadMap::kElementsPerAccess; aligned_reduction_ptr[col_idx] = frag_ptr[column]; } __syncthreads(); // // Now, threads are assigned several columns of the output. They fetch over all rows from // the compacted SMEM tile and perform a reduction. // CUTLASS_PRAGMA_UNROLL for (int j = 0; j < ReductionDetail::kThreadAccessesPerRow; ++j) { int column_idx = thread_idx_ + j * ReductionDetail::kThreadCount; ReductionOp reduction_op; ElementAccumulator reduction_element = ElementAccumulator(); int output_column_idx = threadblock_offset.column() + column_idx; if (column_idx < Shape::kN && output_column_idx < problem_size.column()) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ReductionDetail::kThreadRows; ++row) { if (row) { auto frag = reduction_ptr_[row * Shape::kN + column_idx]; reduction_element = reduction_op(reduction_element, frag); } else { reduction_element = reduction_ptr_[column_idx]; } } // Store reduction_output_ptr[column_idx] = ElementVector(reduction_element); } } } template<class Seq> struct acc2smem; template <size_t... Seq> struct acc2smem<cutlass::index_sequence<Seq...>> { template<int Advance> CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator, WarpTileIterator &warp_tile_iterator) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; } typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); warp_tile_iterator.store(accum_fragment); } CUTLASS_DEVICE static void push(size_t pos, AccumulatorFragmentIterator const &iterator_begin, WarpTileIterator &warp_tile_iterator) { int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...}; } }; /// Streams the result to global memory CUTLASS_DEVICE void compute_source_not_needed_( OutputOp const &output_op, ///< Output operator ReductionFragment &reduction_fragment, ///< Fragment containing the accumulated partial reduction over columns OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additioanl tensor operand MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space ) { // // Iterator over warp-level accumulator fragment // typename TensorTileIterator::Fragment tensor_fragment; tensor_fragment.clear(); AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // #pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1) for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) { // // Convert and store fragment // tensor_iterator.load(tensor_fragment); ++tensor_iterator; __syncthreads(); acc2smem<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push( iter, accum_fragment_iterator, this->warp_tile_iterator_); __syncthreads(); // // Load fragments from shared memory // typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment[0]); // // If the number of k-slices is > 1 - perform a reduction amongst the k-slices // if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { shared_load_iterator_.add_tile_offset({tile_row_offset , 0}); shared_load_iterator_.load(aligned_accum_fragment[i]); aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); } shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0}); } // // Compute the output result // FragmentCompute compute_fragment; apply_output_operator_source_not_needed_( reduction_fragment, compute_fragment, output_op, aligned_accum_fragment[0], tensor_fragment, destination_iterator); // // Store the final result // NumericArrayConverter<ElementOutput, ElementCompute, FragmentCompute::kElements> converter; typename OutputTileIterator::Fragment output_fragment = converter(compute_fragment); destination_iterator.store(output_fragment); ++destination_iterator; } } /// Streams the result to global memory CUTLASS_DEVICE void compute_source_needed_( OutputOp const &output_op, ///< Output operator ReductionFragment &reduction_fragment, ///< Fragment containing the accumulated partial reduction over columns OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) TensorTileIterator tensor_iterator, ///< Threadblock tile iterator for additioanl tensor operand MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space ) { typename OutputTileIterator::Fragment source_fragment; source_fragment.clear(); typename TensorTileIterator::Fragment tensor_fragment; tensor_fragment.clear(); // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // #pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1) for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) { // // Load the source // source_fragment.clear(); source_iterator.load(source_fragment); ++source_iterator; tensor_iterator.load(tensor_fragment); ++tensor_iterator; // // Convert and store fragment // __syncthreads(); acc2smem<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push( iter, accum_fragment_iterator, this->warp_tile_iterator_); __syncthreads(); // // Load fragments from shared memory // typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment[0]); // If the number of k-slices is > 1 - perform a reduction amongst the k-slices if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { shared_load_iterator_.add_tile_offset({tile_row_offset , 0}); shared_load_iterator_.load(aligned_accum_fragment[i]); aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); } shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0}); } // // Compute the output result // FragmentCompute compute_fragment; apply_output_operator_( reduction_fragment, compute_fragment, output_op, aligned_accum_fragment[0], source_fragment, tensor_fragment, destination_iterator); // // Convert and store the final result // NumericArrayConverter<ElementOutput, ElementCompute, FragmentCompute::kElements> converter; typename OutputTileIterator::Fragment output_fragment = converter(compute_fragment); destination_iterator.store(output_fragment); ++destination_iterator; } } /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_( ReductionFragment &reduction_fragment, FragmentCompute &compute_fragment, OutputOp const &output_op, ///< Output operator typename SharedLoadIterator::Fragment const &aligned_accum_fragment, typename OutputTileIterator::Fragment const &source_fragment, typename TensorTileIterator::Fragment const &tensor_fragment, OutputTileIterator const & destination_iterator) { ComputeAccessType *compute_frag_ptr = reinterpret_cast<ComputeAccessType *>(&compute_fragment); AccumulatorAccessType const *accum_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment); OutputAccessType const *source_frag_ptr = reinterpret_cast<OutputAccessType const *>(&source_fragment); TensorAccessType const *tensor_frag_ptr = reinterpret_cast<TensorAccessType const *>(&tensor_fragment); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { // Call the output operator compute_frag_ptr[i] = output_op(accum_frag_ptr[i], source_frag_ptr[i], tensor_frag_ptr[i]); } // // Partial reduction over each column // ReductionOp reduction_op; typename OutputTileIterator::Mask mask; destination_iterator.get_mask(mask); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ReductionDetail::kColumnsPerThread; ++column) { int column_vector_idx = column / ThreadMap::kElementsPerAccess; bool column_guard = mask.predicates[column_vector_idx]; CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ReductionDetail::kRowsPerThread; ++row) { bool fetch; if (ReductionDetail::kOobCheck) { int row_idx = (row % ThreadMap::Iterations::kRow); int residual = (row / ThreadMap::Iterations::kRow); int group_idx = (residual % ThreadMap::Iterations::kGroup); residual = (residual / ThreadMap::Iterations::kGroup); int cluster_idx = (residual % ThreadMap::Iterations::kCluster); int row_offset = row_idx * ThreadMap::Delta::kRow + group_idx * ThreadMap::Delta::kGroup + cluster_idx * ThreadMap::Delta::kCluster; int output_row = destination_iterator.thread_start_row() + row_offset; fetch = (output_row < destination_iterator.extent_row() && column_guard); } else { fetch = true; } ElementCompute value = ElementCompute(); if (fetch) { value = compute_fragment[row * ReductionDetail::kColumnsPerThread + column]; } reduction_fragment[column] = reduction_op( reduction_fragment[column], value); } } } /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_source_not_needed_( ReductionFragment &reduction_fragment, FragmentCompute &compute_fragment, OutputOp const &output_op, ///< Output operator typename SharedLoadIterator::Fragment const &aligned_accum_fragment, typename TensorTileIterator::Fragment const &tensor_fragment, OutputTileIterator const & destination_iterator ) { ComputeAccessType *compute_frag_ptr = reinterpret_cast<ComputeAccessType *>(&compute_fragment); AccumulatorAccessType const *accum_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment); TensorAccessType const *tensor_frag_ptr = reinterpret_cast<TensorAccessType const *>(&tensor_fragment); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { // Call the output operator compute_frag_ptr[i] = output_op(accum_frag_ptr[i], tensor_frag_ptr[i]); } // // Partial reduction over each column // ReductionOp reduction_op; typename OutputTileIterator::Mask mask; destination_iterator.get_mask(mask); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ReductionDetail::kColumnsPerThread; ++column) { int column_vector_idx = column / ThreadMap::kElementsPerAccess; bool column_guard = mask.predicates[column_vector_idx]; CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ReductionDetail::kRowsPerThread; ++row) { bool fetch; if (ReductionDetail::kOobCheck) { int row_idx = (row % ThreadMap::Iterations::kRow); int residual = (row / ThreadMap::Iterations::kRow); int group_idx = (residual % ThreadMap::Iterations::kGroup); residual = (residual / ThreadMap::Iterations::kGroup); int cluster_idx = (residual % ThreadMap::Iterations::kCluster); int row_offset = row_idx * ThreadMap::Delta::kRow + group_idx * ThreadMap::Delta::kGroup + cluster_idx * ThreadMap::Delta::kCluster; int output_row = destination_iterator.thread_start_row() + row_offset; fetch = (output_row < destination_iterator.extent_row() && column_guard); } else { fetch = true; } ElementCompute value = ElementCompute(); if (fetch) { value = compute_fragment[row * ReductionDetail::kColumnsPerThread + column]; } reduction_fragment[column] = reduction_op( reduction_fragment[column], value); } } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
29,199
C
34.436893
129
0.661666
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/platform/platform.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_clamp.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/epilogue/thread/linear_combination_relu0.h" #include "cutlass/epilogue/thread/linear_combination_gelu.h" #include "cutlass/epilogue/thread/linear_combination_sigmoid.h" #include "cutlass/epilogue/thread/linear_combination_hardswish.h" #include "cutlass/epilogue/thread/linear_combination_planar_complex.h" #include "cutlass/epilogue/thread/conversion_op.h" #include "cutlass/epilogue/thread/reduction_op.h" #include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h" #include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h" #include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h" #include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h" #include "cutlass/epilogue/threadblock/shared_load_iterator.h" #include "cutlass/epilogue/threadblock/shared_load_iterator_mixed.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/threadblock/interleaved_epilogue.h" #include "cutlass/layout/permute.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// namespace detail { template < typename ElementOutput, typename ElementAccumulator, int ElementsPerAccess, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp { using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, ElementAccumulator, layout::RowMajor >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< ThreadMap, ElementAccumulator >; static int const kFragmentsPerIteration = 1; }; /// Partial specialization for float <= float x 4 template < typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp<float, float, 4, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, float, layout::RowMajor >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< ThreadMap, float >; static int const kFragmentsPerIteration = 2; }; /// Partial specialization for int32_t <= int32_t x 4 template < typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp<int32_t, int32_t, 4, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, int32_t, layout::RowMajor >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< ThreadMap, int32_t >; static int const kFragmentsPerIteration = 1; }; /// Partial specialization for float <= int32_t x 4 template < typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp<float, int32_t, 4, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, int32_t, layout::RowMajor >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< ThreadMap, int32_t >; static int const kFragmentsPerIteration = 1; }; /// Partial specialization for half <= float x 8 epilogues avoids shared memory bank conflicts. template < typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp< half_t, float, 8, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOpMixed< WarpShape, InstructionShape, float, 32, 16, 8, 8 >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIteratorMixed< ThreadMap, float, 32, 16, 8, 8 >; static int const kFragmentsPerIteration = 2; }; /// Partial specialization for int8/int4b_t <= int32 x 16/8 epilogues avoids shared memory bank conflicts. /// Threadblock::kN = 256 still has bank conflicts. template < typename ElementOutput, int ElementsPerAccess, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp< ElementOutput, int32_t, ElementsPerAccess, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { static_assert(platform::is_same<ElementOutput, cutlass::int4b_t>::value || platform::is_same<ElementOutput, cutlass::uint4b_t>::value || platform::is_same<ElementOutput, int8_t>::value || platform::is_same<ElementOutput, uint8_t>::value, "ElementOutput needs to be 4 or 8 bit (unsigned) int."); static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8), "ElementsPerAccess needs to be 16 or 8."); using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed< WarpShape, InstructionShape, int32_t, 32, cutlass::sizeof_bits<ElementOutput>::value, ElementsPerAccess, 8 >; using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, int32_t, layout::RowMajor >; using WarpTileIterator = typename platform::conditional< (ThreadblockShape::kN == 256), WarpTileIteratorNotMixed, WarpTileIteratorMixed>::type; using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed< ThreadMap, int32_t, 32, cutlass::sizeof_bits<ElementOutput>::value, ElementsPerAccess, 8 >; using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator< ThreadMap, int32_t >; using SharedLoadIterator = typename platform::conditional< (ThreadblockShape::kN == 256), SharedLoadIteratorNotMixed, SharedLoadIteratorMixed>::type; static int const kFragmentsPerIteration = 1; }; /// Partial specialization for float_e4m3_t <= float x 16/8 epilogues avoids shared memory bank conflicts. /// Threadblock::kN = 256 still has bank conflicts. template < int ElementsPerAccess, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp< cutlass::float_e4m3_t, float, ElementsPerAccess, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { using ElementOutput = cutlass::float_e4m3_t; static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8), "ElementsPerAccess needs to be 16 or 8."); using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed< WarpShape, InstructionShape, float, 32, cutlass::sizeof_bits<ElementOutput>::value, ElementsPerAccess, 8 >; using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, float, layout::RowMajor >; using WarpTileIterator = typename platform::conditional< (ThreadblockShape::kN == 256), WarpTileIteratorNotMixed, WarpTileIteratorMixed>::type; using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed< ThreadMap, float, 32, cutlass::sizeof_bits<ElementOutput>::value, ElementsPerAccess, 8 >; using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator< ThreadMap, float >; using SharedLoadIterator = typename platform::conditional< (ThreadblockShape::kN == 256), SharedLoadIteratorNotMixed, SharedLoadIteratorMixed>::type; static int const kFragmentsPerIteration = 1; }; /// Partial specialization for float_e5m2_t <= float x 16/8 epilogues avoids shared memory bank conflicts. /// Threadblock::kN = 256 still has bank conflicts. template < int ElementsPerAccess, typename ThreadblockShape, typename WarpShape, typename InstructionShape, typename ThreadMap > struct DefaultIteratorsTensorOp< cutlass::float_e5m2_t, float, ElementsPerAccess, ThreadblockShape, WarpShape, InstructionShape, ThreadMap> { using ElementOutput = cutlass::float_e5m2_t; static_assert((ElementsPerAccess == 16 || ElementsPerAccess == 8), "ElementsPerAccess needs to be 16 or 8."); using WarpTileIteratorMixed = cutlass::epilogue::warp::TileIteratorTensorOpMixed< WarpShape, InstructionShape, float, 32, cutlass::sizeof_bits<ElementOutput>::value, ElementsPerAccess, 8 >; using WarpTileIteratorNotMixed = cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, float, layout::RowMajor >; using WarpTileIterator = typename platform::conditional< (ThreadblockShape::kN == 256), WarpTileIteratorNotMixed, WarpTileIteratorMixed>::type; using SharedLoadIteratorMixed = cutlass::epilogue::threadblock::SharedLoadIteratorMixed< ThreadMap, float, 32, cutlass::sizeof_bits<ElementOutput>::value, ElementsPerAccess, 8 >; using SharedLoadIteratorNotMixed = cutlass::epilogue::threadblock::SharedLoadIterator< ThreadMap, float >; using SharedLoadIterator = typename platform::conditional< (ThreadblockShape::kN == 256), SharedLoadIteratorNotMixed, SharedLoadIteratorMixed>::type; static int const kFragmentsPerIteration = 1; }; } // namespace detail //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute > struct DefaultEpilogueTensorOp { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; static bool const UseCUDAStore = platform::is_same<ElementOutput, double>::value; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout, UseCUDAStore >; using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value, cutlass::epilogue::warp::FragmentIteratorComplexTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC>, cutlass::epilogue::warp::FragmentIteratorTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC> >::type; /// Support several implementations depending on structure of epilogue using DefaultIterators = detail::DefaultIteratorsTensorOp< ElementOutput, ElementAccumulator, kElementsPerAccess, Shape, typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename OutputTileThreadMap::CompactedThreadMap >; using WarpTileIterator = typename DefaultIterators::WarpTileIterator; using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator; /// Hard-coded padding elements added using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>; static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1); // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding, kFragmentsPerIteration >; }; //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess > struct DefaultEpilogueTensorOpStridedDgrad { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad< OutputTileThreadMap, ElementOutput >; using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value, cutlass::epilogue::warp::FragmentIteratorComplexTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC>, cutlass::epilogue::warp::FragmentIteratorTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC> >::type; /// Support several implementations depending on structure of epilogue using DefaultIterators = detail::DefaultIteratorsTensorOp< ElementOutput, ElementAccumulator, kElementsPerAccess, Shape, typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename OutputTileThreadMap::CompactedThreadMap >; using WarpTileIterator = typename DefaultIterators::WarpTileIterator; using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator; /// Hard-coded padding elements added using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>; static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1); // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding, kFragmentsPerIteration >; }; //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < int Rank, typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess > struct DefaultEpilogueTensorOpAffineRankN { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankN< OutputTileThreadMap, ElementOutput, Rank >; // Map to the row major iterator since the iterator selection for affineN is the same. using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value, cutlass::epilogue::warp::FragmentIteratorComplexTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, layout::RowMajor>, cutlass::epilogue::warp::FragmentIteratorTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, layout::RowMajor> >::type; /// Support several implementations depending on structure of epilogue using DefaultIterators = detail::DefaultIteratorsTensorOp< ElementOutput, ElementAccumulator, kElementsPerAccess, Shape, typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename OutputTileThreadMap::CompactedThreadMap >; using WarpTileIterator = typename DefaultIterators::WarpTileIterator; using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator; /// Hard-coded padding elements added using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>; static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1); // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding, kFragmentsPerIteration >; }; //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps which uses /// intereleaved output layout. For this case, shared memory is not needed. template <typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess, int InterleavedK, bool isSplitK = false> struct DefaultInterleavedEpilogueTensorOp { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock:: DefaultInterleavedThreadMapTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess, InterleavedK>::Type; using OutputTileIterator = cutlass::epilogue::threadblock::InterleavedPredicatedTileIterator< OutputTileThreadMap, ElementOutput, InterleavedK>; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC>; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::InterleavedEpilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, OutputOp, InterleavedK>; }; //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps which uses /// intereleaved output layout. For this case, shared memory is not needed. template <typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess, int InterleavedK, bool isSplitK = false> struct DefaultInterleavedConvEpilogue { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock:: DefaultInterleavedConvThreadMapTensorOp< Shape, typename WarpMmaTensorOp::Shape, kPartitionsK, ElementOutput, kElementsPerAccess, InterleavedK>::Type; using OutputTileIterator = cutlass::epilogue::threadblock::InterleavedConvPredicatedTileIterator< OutputTileThreadMap, ElementOutput, InterleavedK>; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, // can reuse the gemm version here to do element selection layout::ColumnMajorInterleaved<InterleavedK>>; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::InterleavedEpilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, OutputOp, InterleavedK>; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
27,150
C
32.561187
112
0.675212
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_thread_map_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "predicated_tile_iterator.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/pitch_linear.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for TensorOp accumulator layouts template < typename ThreadblockShape_, typename WarpShape_, int PartitionsK, typename Element_, int ElementsPerAccess > struct DefaultThreadMapTensorOp { using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; static int const kPartitionsK = PartitionsK; using Element = Element_; static int const kElementsPerAccess = ElementsPerAccess; // // Definitions // struct Detail { /// Tensor Operations fundamentally perform operations on 8 rows static int const kTensorOpRows = 8; static int const kWarpSize = 32; static_assert( !(ThreadblockShape::kM % WarpShape::kM) && !(ThreadblockShape::kN % WarpShape::kN), "Divisibility"); /// Number of warps using WarpCount = gemm::GemmShape< ThreadblockShape::kM / WarpShape::kM, ThreadblockShape::kN / WarpShape::kN, kPartitionsK >; /// Number of participating threads static int const kThreads = WarpCount::kCount * kWarpSize; }; // // ThreadMap // /// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap using Type = OutputTileOptimalThreadMap < OutputTileShape<ThreadblockShape::kN, Detail::kTensorOpRows, Detail::WarpCount::kM, 1, 1>, OutputTileShape<1, WarpShape::kM / Detail::kTensorOpRows, 1, 1, WarpShape::kM / Detail::kTensorOpRows>, Detail::kThreads, kElementsPerAccess, sizeof_bits<Element>::value >; }; //////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for TensorOp accumulator layouts template <typename ThreadblockShape_, typename WarpShape_, int PartitionsK, typename Element_, int ElementsPerAccess, int InterleavedK> struct DefaultInterleavedThreadMapTensorOp { using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; static int const kPartitionsK = PartitionsK; using Element = Element_; static int const kElementsPerAccess = ElementsPerAccess; static int const kInterleavedK = InterleavedK; // // Definitions // struct Detail { /// Tensor Operations fundamentally perform operations on 8 rows static int const kTensorOpRows = 8; static int const kWarpSize = 32; static_assert(!(ThreadblockShape::kM % WarpShape::kM) && !(ThreadblockShape::kN % WarpShape::kN), "Divisibility"); /// Number of warps using WarpCount = gemm::GemmShape<ThreadblockShape::kM / WarpShape::kM, ThreadblockShape::kN / WarpShape::kN, kPartitionsK>; /// Number of participating threads static int const kThreads = WarpCount::kCount * kWarpSize; }; // // ThreadMap // /// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept /// InterleavedOutputTileThreadMap using Type = InterleavedOutputTileThreadMap< layout::PitchLinearShape<Detail::WarpCount::kM, Detail::WarpCount::kN>, layout::PitchLinearShape<WarpShape::kM / Detail::kTensorOpRows, WarpShape::kN / InterleavedK>, Detail::kThreads, kElementsPerAccess, sizeof_bits<Element>::value>; }; //////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for TensorOp accumulator layouts template <typename ThreadblockShape_, typename WarpShape_, int PartitionsK, typename Element_, int ElementsPerAccess, int InterleavedK> struct DefaultInterleavedConvThreadMapTensorOp { using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; static int const kPartitionsK = PartitionsK; using Element = Element_; static int const kElementsPerAccess = ElementsPerAccess; static int const kInterleavedK = InterleavedK; // // Definitions // struct Detail { /// Tensor Operations fundamentally perform operations on 8 rows static int const kTensorOpRows = 8; static int const kWarpSize = 32; static_assert(!(ThreadblockShape::kM % WarpShape::kM) && !(ThreadblockShape::kN % WarpShape::kN), "Divisibility"); /// Number of warps using WarpCount = gemm::GemmShape<ThreadblockShape::kM / WarpShape::kM, ThreadblockShape::kN / WarpShape::kN, kPartitionsK>; /// Number of participating threads static int const kThreads = WarpCount::kCount * kWarpSize; }; // // ThreadMap // /// ThreadMap to be used by epilogue::MaskedTileIterator satisfying concept /// InterleavedOutputTileThreadMap using Type = InterleavedConvOutputTileThreadMap< MatrixShape<Detail::WarpCount::kM, Detail::WarpCount::kN>, MatrixShape<WarpShape::kM / Detail::kTensorOpRows, WarpShape::kN / InterleavedK>, Detail::kThreads, kElementsPerAccess, sizeof_bits<Element>::value>; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
7,398
C
34.401914
107
0.649365
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using WMMA. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_clamp.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/epilogue/thread/linear_combination_gelu.h" #include "cutlass/epilogue/thread/linear_combination_sigmoid.h" #include "cutlass/epilogue/thread/linear_combination_planar_complex.h" #include "cutlass/epilogue/thread/conversion_op.h" #include "cutlass/epilogue/thread/reduction_op.h" #include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h" #include "cutlass/epilogue/warp/fragment_iterator_wmma_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_wmma_tensor_op.h" #include "cutlass/epilogue/threadblock/default_thread_map_wmma_tensor_op.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/epilogue/threadblock/shared_load_iterator.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/layout/permute.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for WMMA TensorOps. template < typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute > struct DefaultEpilogueWmmaTensorOp { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapWmmaTensorOp< Shape, typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorWmmaTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorWmmaTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
5,947
C
34.831325
100
0.702707
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_visitor_with_softmax.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue visitor for threadblock scoped GEMMs that process softmax computations in epilogue. The epilogue finds max values in each row of the row-major output matrix and stores them. The max values are also used for a further round of threadblock scoped reduction operation, where the partial reduction results are stored in a pre-allocated array and used for further full reduction. */ #pragma once ///////////////////////////////////////////////////////////////////////////////////////////////// #include "cutlass/cutlass.h" #include "cutlass/arch/memory.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/numeric_conversion.h" #include "cutlass/fast_math.h" namespace cutlass { namespace epilogue { namespace threadblock { template < typename ThreadblockShape_, int ThreadCount, typename OutputTileIterator_, typename ElementAccumulator_, typename ElementNorm_, typename ElementSum_, typename ElementSoftmaxCompute_, typename ElementwiseFunctor_, bool UseMasking_ = false > class EpilogueVisitorSoftmax { public: using ThreadblockShape = ThreadblockShape_; static int const kThreadCount = ThreadCount; using OutputTileIterator = OutputTileIterator_; using ElementwiseFunctor = ElementwiseFunctor_; static int const kIterations = OutputTileIterator::kIterations; static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; using ElementOutput = typename OutputTileIterator::Element; using LayoutOutput = cutlass::layout::RowMajor; using ElementAccumulator = ElementAccumulator_; using ElementNorm = ElementNorm_; using ElementSum = ElementSum_; using ElementSoftmaxCompute = ElementSoftmaxCompute_; using AccumulatorFragment = Array<ElementAccumulator, kElementsPerAccess>; using SoftmaxFragment = Array<ElementSoftmaxCompute, kElementsPerAccess>; using OutputVector = Array<ElementOutput, kElementsPerAccess>; using TensorRefD = TensorRef<ElementOutput, LayoutOutput>; static int const kThreadsPerRow = OutputTileIterator::ThreadMap::Detail::kAccessWidth; static bool const kHasMultiStepsInRow = (OutputTileIterator::ThreadMap::Iterations::kColumn > 1); static bool const kUseMasking = UseMasking_; /// Argument structure struct Arguments { typename ElementwiseFunctor::Params elementwise; int64_t batch_stride_C; int64_t batch_stride_D; int64_t batch_stride_Max; int64_t batch_stride_Sum; // // Methods // Arguments(): batch_stride_C(0), batch_stride_D(0), batch_stride_Max(0), batch_stride_Sum(0) { } Arguments( typename ElementwiseFunctor::Params elementwise_ ): elementwise(elementwise_), batch_stride_C(0), batch_stride_D(0), batch_stride_Max(0), batch_stride_Sum(0) { } Arguments( typename ElementwiseFunctor::Params elementwise_, int64_t batch_stride_C_, int64_t batch_stride_D_, int64_t batch_stride_Max_, int64_t batch_stride_Sum_ ): elementwise(elementwise_), batch_stride_C(batch_stride_C_), batch_stride_D(batch_stride_D_), batch_stride_Max(batch_stride_Max_), batch_stride_Sum(batch_stride_Sum_) { } }; struct Params { typename ElementwiseFunctor::Params elementwise; int64_t batch_stride_C; int64_t batch_stride_D; int64_t batch_stride_Max; int64_t batch_stride_Sum; // // Methods // CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Arguments const &args): elementwise(args.elementwise), batch_stride_C(args.batch_stride_C), batch_stride_D(args.batch_stride_D), batch_stride_Max(args.batch_stride_Max), batch_stride_Sum(args.batch_stride_Sum) { } }; /// Shared storage struct SharedStorage { }; private: Params const & params_; SharedStorage & shared_storage_; MatrixCoord extent_; MatrixCoord extent_real_; ElementwiseFunctor elementwise_; OutputTileIterator iterator_C_; OutputTileIterator iterator_D_; typename OutputTileIterator::Fragment fragment_C_; typename OutputTileIterator::Fragment fragment_D_; ElementAccumulator alpha_; ElementAccumulator beta_; ElementNorm *ptr_Max_; ElementSum *ptr_Sum_; int column_offset_; ElementSoftmaxCompute accum_max_; ElementSoftmaxCompute accum_sum_; MatrixCoord thread_offset_; float infinity_; public: CUTLASS_DEVICE EpilogueVisitorSoftmax( Params const &params, SharedStorage &shared_storage, cutlass::MatrixCoord const &problem_size, int thread_idx, int warp_idx, int lane_idx, typename OutputTileIterator::Params params_C, typename OutputTileIterator::Params params_D, typename OutputTileIterator::Element *ptr_C, typename OutputTileIterator::Element *ptr_D, ElementNorm *ptr_Max = nullptr, ElementSum *ptr_Sum = nullptr, cutlass::MatrixCoord const &threadblock_offset = cutlass::MatrixCoord(0, 0), int column_offset = 0, cutlass::MatrixCoord const &problem_size_real = cutlass::MatrixCoord(0, 0), float infinity = 10000.0f ): params_(params), shared_storage_(shared_storage), extent_(problem_size), elementwise_(params.elementwise), iterator_C_(params_C, ptr_C, problem_size, thread_idx, threadblock_offset), iterator_D_(params_D, ptr_D, problem_size, thread_idx, threadblock_offset), ptr_Max_(ptr_Max), ptr_Sum_(ptr_Sum), column_offset_(column_offset), extent_real_(problem_size_real), infinity_(infinity) { alpha_ = (params.elementwise.alpha_ptr ? *params.elementwise.alpha_ptr : params.elementwise.alpha); beta_ = (params.elementwise.beta_ptr ? *params.elementwise.beta_ptr : params.elementwise.beta); if (beta_ == ElementAccumulator()) { iterator_C_.clear_mask(); } } /// Helper to indicate split-K behavior CUTLASS_DEVICE void set_k_partition( int split_k_index, ///< Index of this threadblock within split-K partitioned scheme int split_k_slices) { ///< Total number of split-K slices } /// Called to set the batch index CUTLASS_DEVICE void set_batch_index(int batch_idx) { iterator_C_.add_pointer_offset(batch_idx * params_.batch_stride_C); iterator_D_.add_pointer_offset(batch_idx * params_.batch_stride_D); } /// Called at the start of the epilogue just before iterating over accumulator slices CUTLASS_DEVICE void begin_epilogue() { } /// Called at the start of one step before starting accumulator exchange CUTLASS_DEVICE void begin_step(int step_idx) { fragment_D_.clear(); fragment_C_.clear(); if (elementwise_.kScale != cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling) { iterator_C_.load(fragment_C_); ++iterator_C_; } } /// Called at the start of a row CUTLASS_DEVICE void begin_row(int row_idx) { // Clear accumulators for max and sum when starting a whole row clear_accum_(); } /// Called after accumulators have been exchanged for each accumulator vector CUTLASS_DEVICE void visit( int iter_idx, int row_idx, int column_idx, int frag_idx, AccumulatorFragment const &accum) { using Mul = cutlass::multiplies<SoftmaxFragment>; using Minus = cutlass::minus<SoftmaxFragment>; using Exp = cutlass::fast_exp_op<SoftmaxFragment>; Minus minus; Exp exponential; SoftmaxFragment result; NumericArrayConverter<ElementSoftmaxCompute, ElementOutput, kElementsPerAccess> source_converter; OutputVector &source_vector = reinterpret_cast<OutputVector *>(&fragment_C_)[frag_idx]; if (elementwise_.kScale == cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling) { result = source_converter(elementwise_(accum)); }else{ result = source_converter(elementwise_(accum, source_vector)); } thread_offset_ = iterator_D_.thread_start() + OutputTileIterator::ThreadMap::iteration_offset(frag_idx); bool column_guard = (thread_offset_.column() < extent_.column()); if (kUseMasking) { int elements_in_boundary = extent_real_.column() - thread_offset_.column(); elements_in_boundary = (elements_in_boundary > kElementsPerAccess) ? kElementsPerAccess : elements_in_boundary; elementwise_padding_(result, elements_in_boundary); } ElementSoftmaxCompute accum_max_prev = accum_max_; // Compute the maximum within one row if (!column_idx) { // This is the first fragment in a new row if (column_guard) { accum_max_ = maximum_accumulator_(result); } } else { // This is an additional fragment in the same row if (column_guard) { accum_max_ = maximum_accumulator_(result, accum_max_); } } // proactively compute max in warps accum_max_ = warp_reduce_max_(accum_max_); ElementSoftmaxCompute updater = fast_exp(accum_max_prev - accum_max_); SoftmaxFragment intermediate = exponential(minus(result, accum_max_)); if (kHasMultiStepsInRow) { if (!column_idx) { accum_sum_ = (column_guard) ? \ sum_accumulator_(intermediate) : ElementSoftmaxCompute(0); } else { // Algorithm in $3.1, https://arxiv.org/pdf/2205.14135v1.pdf // S* = S* x updater + sum_row(P'), where updater = exp(M* - M_row) accum_sum_ = (column_guard) ? \ sum_accumulator_(intermediate, accum_sum_ * updater) : accum_sum_ * updater; } } else { accum_sum_ = (column_guard) ? sum_accumulator_(intermediate, accum_sum_) : ElementSoftmaxCompute(0); } // Convert to the output NumericArrayConverter<ElementOutput, ElementSoftmaxCompute, kElementsPerAccess> output_converter; OutputVector &output = reinterpret_cast<OutputVector *>(&fragment_D_)[frag_idx]; output = output_converter(result); } /// Called at the end of a row CUTLASS_DEVICE void end_row(int row_idx) { using ConvertSumOutput = cutlass::NumericConverter<ElementSum, ElementSoftmaxCompute>; using ConvertNormOutput = cutlass::NumericConverter<ElementNorm, ElementSoftmaxCompute>; ConvertSumOutput convert_sum_output; ConvertNormOutput convert_norm_output; // Compute accumulate sum only in the last step accum_sum_ = warp_reduce_sum_(accum_sum_); bool is_first_thread_in_tile = ((threadIdx.x % kThreadsPerRow) == 0); bool row_guard = thread_offset_.row() < extent_.row(); bool is_write_thread = row_guard && is_first_thread_in_tile; int block_batch = blockIdx.z; ElementNorm *curr_ptr_max = ptr_Max_ + thread_offset_.row() + column_offset_ + block_batch * params_.batch_stride_Max; ElementSum *curr_ptr_sum = ptr_Sum_ + thread_offset_.row() + column_offset_ + block_batch * params_.batch_stride_Sum; arch::global_store<ElementNorm, sizeof(ElementNorm)>( convert_norm_output(accum_max_), (void *)curr_ptr_max, is_write_thread); arch::global_store<ElementSum, sizeof(ElementSum)>( convert_sum_output(accum_sum_), (void *)curr_ptr_sum, is_write_thread); // Clear accumulators for max and sum when finishing a whole row clear_accum_(); } /// Called after all accumulator elements have been visited CUTLASS_DEVICE void end_step(int step_idx) { iterator_D_.store(fragment_D_); ++iterator_D_; } /// Called after all steps have been completed CUTLASS_DEVICE void end_epilogue() { } private: CUTLASS_DEVICE void elementwise_padding_(SoftmaxFragment &result, int elements_in_boundary) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < SoftmaxFragment::kElements; ++i) { result[i] = (i < elements_in_boundary) ? result[i] : ElementSoftmaxCompute(-infinity_); } } CUTLASS_DEVICE ElementSoftmaxCompute warp_reduce_sum_(ElementSoftmaxCompute sum_) { int half_thread_in_row = (kThreadsPerRow >> 1); CUTLASS_PRAGMA_UNROLL for (int i = half_thread_in_row; i > 0; i >>= 1) { ElementSoftmaxCompute tmp = __shfl_xor_sync(0xFFFFFFFF, sum_, i); sum_ += tmp; } return sum_; } CUTLASS_DEVICE ElementSoftmaxCompute warp_reduce_max_(ElementSoftmaxCompute max_) { int half_thread_in_row = (kThreadsPerRow >> 1); CUTLASS_PRAGMA_UNROLL for (int i = half_thread_in_row; i > 0; i >>= 1) { ElementSoftmaxCompute tmp = __shfl_xor_sync(0xFFFFFFFF, max_, i); max_ = fast_max(max_, tmp); } return max_; } CUTLASS_DEVICE void clear_accum_() { uint32_t float_max_bits = 0xff7fffff; // -FLT_MAX float min_float = reinterpret_cast<float const &>(float_max_bits); accum_max_ = ElementSoftmaxCompute(min_float); accum_sum_ = ElementSoftmaxCompute(0); } CUTLASS_DEVICE ElementSoftmaxCompute sum_accumulator_(SoftmaxFragment const &accum) { ElementSoftmaxCompute sum_ = ElementSoftmaxCompute(0); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < SoftmaxFragment::kElements; ++i) { sum_ += ElementSoftmaxCompute(accum[i]); } return sum_; } CUTLASS_DEVICE ElementSoftmaxCompute sum_accumulator_(SoftmaxFragment const &accum, ElementSoftmaxCompute sum_) { // ElementSoftmaxCompute sum_ = ElementSoftmaxCompute(0); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < SoftmaxFragment::kElements; ++i) { sum_ += ElementSoftmaxCompute(accum[i]); } return sum_; } CUTLASS_DEVICE ElementSoftmaxCompute maximum_accumulator_(SoftmaxFragment const &accum) { ElementSoftmaxCompute max_ = accum[0]; CUTLASS_PRAGMA_UNROLL for (int i = 1; i < SoftmaxFragment::kElements; ++i) { max_ = fast_max(max_, ElementSoftmaxCompute(accum[i])); } return max_; } CUTLASS_DEVICE ElementSoftmaxCompute maximum_accumulator_(SoftmaxFragment const &accum, ElementSoftmaxCompute max_) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < SoftmaxFragment::kElements; ++i) { max_ = fast_max(max_, ElementSoftmaxCompute(accum[i])); } return max_; } }; } // namespace threadblock } // namespace epilogue } // namespace cutlass
16,804
C
31.694552
130
0.644965
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "predicated_tile_iterator.h" #include "cutlass/gemm/gemm.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for TensorOp accumulator layouts template < typename ThreadblockShape, typename WarpShape, int PartitionsK, typename ElementOutput, int ElementsPerAccess, typename ElementAccumulator > struct DefaultThreadMapVoltaTensorOp; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for TensorOp accumulator layouts template < typename ThreadblockShape_, typename WarpShape_, int PartitionsK, typename ElementOutput_, int ElementsPerAccess > struct DefaultThreadMapVoltaTensorOp< ThreadblockShape_, WarpShape_, PartitionsK, ElementOutput_, ElementsPerAccess, half_t> { using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; static int const kPartitionsK = PartitionsK; using ElementOutput = ElementOutput_; static int const kElementsPerAccess = ElementsPerAccess; using ElementAccumulator = half_t; // // Definitions // struct Detail { static int const kTensorOpRows = 16; static int const kWarpSize = 32; static int const kInterleavedTilesM = WarpShape::kM / 32; static_assert( !(ThreadblockShape::kM % WarpShape::kM) && !(ThreadblockShape::kN % WarpShape::kN), "Divisibility"); /// Number of warps using WarpCount = gemm::GemmShape< ThreadblockShape::kM / WarpShape::kM, ThreadblockShape::kN / WarpShape::kN, kPartitionsK >; /// Number of participating threads static int const kThreads = WarpCount::kCount * kWarpSize; using Shape = cutlass::epilogue::threadblock::OutputTileShape< ThreadblockShape::kN, // column 4, // row 4, // group WarpCount::kM, // cluster 1 // tile >; /// Number of iterations per subspace using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 2, // row kInterleavedTilesM, // group 1, // cluster WarpShape::kM / kTensorOpRows // iterations >; }; // // ThreadMap // /// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap using Type = OutputTileOptimalThreadMap < typename Detail::Shape, typename Detail::Count, Detail::kThreads, kElementsPerAccess, sizeof_bits<ElementOutput>::value >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for TensorOp accumulator layouts template < typename ThreadblockShape_, typename WarpShape_, int PartitionsK, typename ElementOutput_, int ElementsPerAccess > struct DefaultThreadMapVoltaTensorOp< ThreadblockShape_, WarpShape_, PartitionsK, ElementOutput_, ElementsPerAccess, float> { using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; static int const kPartitionsK = PartitionsK; using ElementOutput = ElementOutput_; static int const kElementsPerAccess = ElementsPerAccess; using ElementAccumulator = float; // // Definitions // struct Detail { static int const kTensorOpRows = 16; static int const kWarpSize = 32; static int const kInterleavedTilesM = WarpShape::kM / 32; static_assert( !(ThreadblockShape::kM % WarpShape::kM) && !(ThreadblockShape::kN % WarpShape::kN), "Divisibility"); /// Number of warps using WarpCount = gemm::GemmShape< ThreadblockShape::kM / WarpShape::kM, ThreadblockShape::kN / WarpShape::kN, kPartitionsK >; /// Number of participating threads static int const kThreads = WarpCount::kCount * kWarpSize; using Shape = cutlass::epilogue::threadblock::OutputTileShape< ThreadblockShape::kN, // column 4, // row 4, // group WarpCount::kM, // cluster 1 // tile >; /// Number of iterations per subspace using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 2, // row kInterleavedTilesM, // group 1, // cluster WarpShape::kM / kTensorOpRows // iterations >; }; // // ThreadMap // /// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap using Type = OutputTileOptimalThreadMap < typename Detail::Shape, typename Detail::Count, Detail::kThreads, kElementsPerAccess, sizeof_bits<ElementOutput>::value >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
7,303
C
30.895196
101
0.596878
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_blas3.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/memory.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { //////////////////////////////////////////////////////////////////////////////// namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load and store output tile from global memory in epilogue. /// /// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_, ///< Element data type BlasMode BlasMode_ = BlasMode::kGemm ///< Tile Iterator for a Symmetric or Hermitian Kernel > class PredicatedTileIteratorBlas3 { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::Shape; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static BlasMode const kBlasMode = BlasMode_; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kThreads = ThreadMap::kThreads; static int const kIterations = ThreadMap::Count::kTile; static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0"); static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0"); static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0"); static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0"); /// Fragment object using Fragment = Array< Element, ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; static_assert( AccessType::kElements == 1, "BLAS3 Epilogue must use AccessType::kElements as 1"); // // Parameters struct // /// Uses a non-template class struct Params : PredicatedTileIteratorParams { CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Layout const &layout): PredicatedTileIteratorParams( layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess, make_OutputTileThreadMapDesc<ThreadMap>() ) { } }; /// Mask object struct Mask { static int const kCount = ThreadMap::Iterations::kColumn; /// Predicate state bool predicates[kCount]; // // Mask // CUTLASS_HOST_DEVICE Mask() { enable(); } ///< Efficiently disables all accesses guarded by mask CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = false; } } ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask CUTLASS_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = true; } } }; private: // // Data members // /// Parameters structure containing reference and precomputed state. PredicatedTileIteratorParams params_; /// Byte-level pointer uint8_t *byte_pointer_; /// Fill Mode for a tile on diagonal of a symmetric kernel cutlass::FillMode fill_mode; /// Array of boolean values to contain steady-state predicates Mask mask_; /// Extent of the matrix tile in rows Index extent_row_; /// A thread's starting row position (assuming steady-state predicates have been computed) Index thread_start_row_; /// Internal state counter int state_[3]; /// Starting address of the matrix size_t matrix_start_addr; static_assert((kBlasMode == BlasMode::kSymmetric || kBlasMode == BlasMode::kHermitian), "Unsupported blas3 mode."); private: // // Methods // public: // // Methods // /// Constructor CUTLASS_DEVICE PredicatedTileIteratorBlas3( PredicatedTileIteratorParams const & params, Element *pointer, TensorCoord extent, int thread_idx, TensorCoord threadblock_offset , cutlass::FillMode fill_mode ): params_(params), fill_mode(fill_mode) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset; extent_row_ = extent.row(); thread_start_row_ = thread_offset.row(); // Initialize predicates CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) { mask_.predicates[c] = ((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column()); } // Check Symmetric kernel modes (Lower and Upper - for diagonal CTAs, None for rest CTAs) if ((kBlasMode == BlasMode::kSymmetric || kBlasMode == BlasMode::kHermitian) && fill_mode == cutlass::FillMode::kInvalid) { arch::device_breakpoint(); } // Starting address of the matrix matrix_start_addr = reinterpret_cast<size_t>(pointer); // Initialize pointer byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) + LongIndex(thread_offset.row()) * LongIndex(params_.stride) + LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess; // Initialize internal state counter state_[0] = state_[1] = state_[2] = 0; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment &frag, int64_t byte_offset) { uint8_t *byte_pointer = byte_pointer_; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; bool row_guard = ((row_offset + thread_start_row_) < extent_row_); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { bool guard = row_guard && mask_.predicates[column]; cutlass::arch::global_load< AccessType, sizeof(AccessType) >( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess], guard); } if (row + 1 < ThreadMap::Iterations::kRow) { byte_pointer += params_.increment_row; } } if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; } } if (cluster + 1 < ThreadMap::Iterations::kCluster) { byte_pointer += params_.increment_cluster; } } } /// Loads a fragment on the diagonal of a symmetric kernel to memory CUTLASS_DEVICE void load_symmetric_with_byte_offset(Fragment &frag, int64_t byte_offset) { uint8_t *byte_pointer = byte_pointer_; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); bool isLowerMode = (fill_mode == cutlass::FillMode::kLower) ? true : false; CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; bool row_guard = ((row_offset + thread_start_row_) < extent_row_); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); // Offset of row from beginning of the matrix per thread size_t row_start_offset = (size_t)memory_pointer - matrix_start_addr; // Absolute row index int row_index = int(row_start_offset/params_.stride); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { bool guard = row_guard && mask_.predicates[column]; // Offset of column from beginning of row per thread size_t col_start_offset = row_start_offset + (column * ThreadMap::Delta::kColumn / kElementsPerAccess) * sizeof(AccessType); // Absolute column index size_t col_index = (col_start_offset%params_.stride)/sizeof(AccessType); guard = guard && ( (isLowerMode && row_index >= col_index) || (!isLowerMode && row_index <= col_index) ); cutlass::arch::global_load< AccessType, sizeof(AccessType) >( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess], guard); // The imaginary parts of the diagonal elements of a complex element are assumed and set to zero if (guard && kBlasMode == BlasMode::kHermitian && cutlass::is_complex<Element>::value) { Element *scalar_ptr = reinterpret_cast<Element *>(frag_ptr); if (row_index == col_index) { scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column] = real(scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column]); } } } if (row + 1 < ThreadMap::Iterations::kRow) { byte_pointer += params_.increment_row; } } if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; } } if (cluster + 1 < ThreadMap::Iterations::kCluster) { byte_pointer += params_.increment_cluster; } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { if (fill_mode == cutlass::FillMode::kNone) { load_with_byte_offset(frag, 0); } else { load_symmetric_with_byte_offset(frag, 0); } } /// Stores a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) { uint8_t *byte_pointer = byte_pointer_; AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; bool row_guard = ((row_offset + thread_start_row_) < extent_row_); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { bool guard = row_guard && mask_.predicates[column]; cutlass::arch::global_store<AccessType, sizeof(AccessType)>( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess], guard); } if (row + 1 < ThreadMap::Iterations::kRow) { byte_pointer += params_.increment_row; } } if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; } } if (cluster + 1 < ThreadMap::Iterations::kCluster) { byte_pointer += params_.increment_cluster; } } } /// Stores a fragment on the diagonal of a symmetric kernel to memory CUTLASS_DEVICE void store_symmetric_with_byte_offset(Fragment const &frag, int64_t byte_offset) { uint8_t *byte_pointer = byte_pointer_; AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); bool isLowerMode = (fill_mode == cutlass::FillMode::kLower) ? true : false; CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; bool row_guard = ((row_offset + thread_start_row_) < extent_row_); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); // Offset of row from beginning of the matrix per thread size_t row_start_offset = (size_t)memory_pointer - matrix_start_addr; // Absolute row index int row_index = int(row_start_offset/params_.stride); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { bool guard = row_guard && mask_.predicates[column]; // Offset of column from beginning of row per thread size_t col_start_offset = row_start_offset + (column * ThreadMap::Delta::kColumn / kElementsPerAccess) * sizeof(AccessType); // Absolute column index size_t col_index = (col_start_offset%params_.stride)/sizeof(AccessType); guard = guard && ( (isLowerMode && row_index >= col_index) || (!isLowerMode && row_index <= col_index) ); // The imaginary parts of the diagonal elements of a complex element are assumed and set to zero if (guard && kBlasMode == BlasMode::kHermitian && cutlass::is_complex<Element>::value) { AccessType *frag_ptr_modify = const_cast<AccessType *>(frag_ptr); Element *scalar_ptr = reinterpret_cast<Element *>(frag_ptr_modify); if (row_index == col_index) { scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column] = real(scalar_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column]); } } cutlass::arch::global_store<AccessType, sizeof(AccessType)>( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess], guard); } if (row + 1 < ThreadMap::Iterations::kRow) { byte_pointer += params_.increment_row; } } if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; } } if (cluster + 1 < ThreadMap::Iterations::kCluster) { byte_pointer += params_.increment_cluster; } } } /// Stores a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { if (fill_mode == cutlass::FillMode::kNone) { store_with_byte_offset(frag, 0); } else { store_symmetric_with_byte_offset(frag, 0); } } /// Advances to the next position to load or store CUTLASS_HOST_DEVICE PredicatedTileIteratorBlas3 &operator++() { ++state_[0]; byte_pointer_ += params_.advance_row; thread_start_row_ += ThreadMap::Shape::kRow; if (state_[0] == ThreadMap::Count::kRow) { state_[0] = 0; ++state_[1]; byte_pointer_ += params_.advance_group; thread_start_row_ += (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow; if (state_[1] == ThreadMap::Count::kGroup) { state_[1] = 0; ++state_[2]; byte_pointer_ += params_.advance_cluster; thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow; if (state_[2] == ThreadMap::Count::kCluster) { state_[2] = 0; byte_pointer_ += params_.advance_tile; } } } return *this; } ///< Efficiently disables all accesses guarded by mask CUTLASS_DEVICE void clear_mask() { mask_.clear(); } ///< Efficiently enables all accesses guarded by mask CUTLASS_DEVICE void enable_mask() { mask_.enable(); } ///< Sets the mask CUTLASS_DEVICE void get_mask(Mask &mask) { mask = mask_; } ///< Sets the mask CUTLASS_DEVICE void set_mask(Mask const &mask) { mask_ = mask; } }; /////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
21,249
C
32.51735
108
0.605158
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/shared_load_iterator_mixed.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops optimized for mixed-precision. This assumes the shared memory tile is in a permuted layout which avoids bank conflicts on loading. When the fragment is loaded into registers, it matches the row-major thread map assumed by the predicated tile iterator writing to global memory. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load output tile from shared memory in epilogue. /// /// Satisfies: ReadableTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_, ///< Accumulator data type int ElementSizeBits_, ///< Size of accumulator in bits int OutputSizeBits_, ///< Size of output element in bits int ElementsPerAccess, ///< Vector length of output vector int ContiguousLanes ///< Number of lanes in the warp writing to contiguous elements /// in the global memory tensor > class SharedLoadIteratorMixed; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load output tile from shared memory in epilogue. /// /// Satisfies: ReadableTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_ ///< Accumulator data type > class SharedLoadIteratorMixed<ThreadMap_, Element_, 32, 16, 8, 8> { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::Shape; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kAlignment = ThreadMap::kElementsPerAccess * sizeof_bits<Element_>::value / 8; static int const kThreads = ThreadMap::kThreads; /// Fragment object using Fragment = Array< Element, ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray< Element, ThreadMap::kElementsPerAccess, kAlignment>; /// Vector type used for SMEM loads using LoadType = AlignedArray< Element, const_min(128 / sizeof_bits<Element>::value, ThreadMap::kElementsPerAccess), const_min(16, kAlignment) >; static int const kLoadsPerAccess = AccessType::kElements / LoadType::kElements; private: // // Data members // /// Byte-level pointer LoadType const *pointers_[kLoadsPerAccess]; /// Stride along adjacent rows in units of LoadType int stride_; public: // // Methods // /// Constructor CUTLASS_DEVICE SharedLoadIteratorMixed( TensorRef ref, int thread_idx ): stride_((ref.stride(0) / LoadType::kElements)) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx); // Initialize pointers CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kLoadsPerAccess; ++i) { pointers_[i] = reinterpret_cast<LoadType const *>(ref.data()); int col_idx = (thread_offset.column() / kElementsPerAccess) * kLoadsPerAccess; int bank_offset = (col_idx * int(sizeof(LoadType)) / 128) % kLoadsPerAccess; col_idx += (bank_offset + i) % kLoadsPerAccess; pointers_[i] += thread_offset.row() * stride_ + col_idx; } } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kLoadsPerAccess; ++i) { pointers_[i] += pointer_offset / LoadType::kElements; } } CUTLASS_DEVICE void add_tile_offset(TensorCoord const &offset) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kLoadsPerAccess; ++i) { pointers_[i] += offset.row() * Shape::kRow * stride_ + offset.column() * Shape::kColumn / LoadType::kElements; } } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int row_ptr_offset = row * ThreadMap::Delta::kRow * stride_ + group * ThreadMap::Delta::kGroup* stride_ + cluster * ThreadMap::Delta::kCluster * stride_ + pointer_offset / LoadType::kElements; int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kLoadsPerAccess; ++v) { int vector_idx = (column * ThreadMap::Delta::kColumn / kElementsPerAccess * kLoadsPerAccess); LoadType const *memory_pointer = pointers_[v] + row_ptr_offset; frag_ptr[frag_idx * kLoadsPerAccess + v] = memory_pointer[vector_idx]; } } } } } } /// Set base smem address CUTLASS_DEVICE void set_smem_base_address(Index address) {} /// Loads a fragment CUTLASS_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for int32_t x 16 => int8_t/int4b_t x 16 template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) int OutputSizeBits_ ///< Size of output element in bits > class SharedLoadIteratorMixed<ThreadMap_, int32_t, 32, OutputSizeBits_, 16, 8> { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::Shape; using Element = int32_t; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kAlignment = 16; static int const kThreads = ThreadMap::kThreads; /// Fragment object using Fragment = Array< Element, ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray< Element, 16, kAlignment>; /// Vector type used for SMEM loads using LoadType = AlignedArray< Element, 4, 16 >; static int const kLoadsPerAccess = 4; private: // // Data members // /// Byte-level pointer LoadType const *pointers_[kLoadsPerAccess]; /// Stride along adjacent rows in units of LoadType int stride_; public: // // Methods // /// Constructor CUTLASS_DEVICE SharedLoadIteratorMixed( TensorRef ref, int thread_idx ): stride_((ref.stride(0) / LoadType::kElements)) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx); // Initialize pointers LoadType const *base_ptr = reinterpret_cast<LoadType const *>(ref.data()) + thread_offset.row() * stride_; int lane_col_idx = thread_offset.column() / 16; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kLoadsPerAccess; ++i) { int lane_offset = (lane_col_idx % 2) * 4 | ((lane_col_idx / 2) * 8) | ((lane_col_idx / 2) ^ i); pointers_[i] = base_ptr + lane_offset; } } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kLoadsPerAccess; ++i) { pointers_[i] += pointer_offset / LoadType::kElements; } } CUTLASS_DEVICE void add_tile_offset(TensorCoord const &offset) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kLoadsPerAccess; ++i) { pointers_[i] += offset.row() * Shape::kRow * stride_ + offset.column() * Shape::kColumn / LoadType::kElements; } } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int row_ptr_offset = row * ThreadMap::Delta::kRow * stride_ + group * ThreadMap::Delta::kGroup* stride_ + cluster * ThreadMap::Delta::kCluster * stride_ + pointer_offset / LoadType::kElements; int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kLoadsPerAccess; ++v) { LoadType const *memory_pointer = pointers_[v]; frag_ptr[frag_idx * kLoadsPerAccess + v] = memory_pointer[row_ptr_offset]; } } } } } } /// Set base smem address CUTLASS_DEVICE void set_smem_base_address(Index address) {} /// Loads a fragment CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for int32_t x 8 => int8_t/int4b_t x 8 template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) int OutputSizeBits_ > class SharedLoadIteratorMixed<ThreadMap_, int32_t, 32, OutputSizeBits_, 8, 8> { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::Shape; using Element = int32_t; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kAlignment = 8; static int const kThreads = ThreadMap::kThreads; /// Fragment object using Fragment = Array< Element, ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray< Element, 8, kAlignment>; /// Vector type used for SMEM loads using LoadType = AlignedArray< Element, 4, 16 >; static int const kLoadsPerAccess = 2; private: // // Data members // /// Byte-level pointer LoadType const *pointers_[kLoadsPerAccess]; /// Stride along adjacent rows in units of LoadType int stride_; public: // // Methods // /// Constructor CUTLASS_DEVICE SharedLoadIteratorMixed( TensorRef ref, int thread_idx ): stride_((ref.stride(0) / LoadType::kElements)) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx); // Initialize pointers LoadType const *base_ptr = reinterpret_cast<LoadType const *>(ref.data()) + thread_offset.row() * stride_; int lane_col_idx = thread_offset.column() / 8; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kLoadsPerAccess; ++i) { int lane_offset = (lane_col_idx % 8) * 2 | ((lane_col_idx / 4) ^ i); pointers_[i] = base_ptr + lane_offset; } } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kLoadsPerAccess; ++i) { pointers_[i] += pointer_offset / LoadType::kElements; } } CUTLASS_DEVICE void add_tile_offset(TensorCoord const &offset) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kLoadsPerAccess; ++i) { pointers_[i] += offset.row() * Shape::kRow * stride_ + offset.column() * Shape::kColumn / LoadType::kElements; } } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int row_ptr_offset = row * ThreadMap::Delta::kRow * stride_ + group * ThreadMap::Delta::kGroup* stride_ + cluster * ThreadMap::Delta::kCluster * stride_ + pointer_offset / LoadType::kElements; int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kLoadsPerAccess; ++v) { LoadType const *memory_pointer = pointers_[v]; frag_ptr[frag_idx * kLoadsPerAccess + v] = memory_pointer[row_ptr_offset]; } } } } } } /// Set base smem address CUTLASS_DEVICE void set_smem_base_address(Index address) {} /// Loads a fragment CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
17,683
C
29.177474
117
0.623084
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/memory.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { //////////////////////////////////////////////////////////////////////////////// namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load and store output tile from global memory in epilogue. /// /// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator /// /// It provides a fast path for the case Rank = 2 which does not need div/rem to /// calculate modes. template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_, ///< Element data type int Rank > class PredicatedTileIteratorAffineRankN { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::Shape; using Element = Element_; using Layout = layout::AffineRankN<Rank>; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = typename Layout::TensorCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kThreads = ThreadMap::kThreads; static int const kIterations = ThreadMap::Count::kTile; static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0"); static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0"); static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0"); static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0"); static_assert( !(Layout::kRank % 2), "Layout rank must be even. This assumes the first half of the modes correspond to the 'row' " "and the second half of the modes correspond to the 'column'"); static bool const kBigEndian = false; /// Fragment object using Fragment = Array< Element, ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; // // Parameters struct // /// Parameters structure struct Params { // // Data members // Layout layout; /// Stride in units of bytes along M modes Coord<Layout::kRank/2, typename Layout::LongIndex> stride_m; /// Stride in units of bytes along N modes Coord<Layout::kRank/2, typename Layout::LongIndex> stride_n; /// Fast divmod objects divided by tensor extents FastDivmod divmod_m[(Layout::kRank == 2) ? 1 : (Layout::kRank/2 - 1)]; /// Fast divmod objects divided by tensor extents FastDivmod divmod_n[(Layout::kRank == 2) ? 1 : (Layout::kRank/2 - 1)]; int64_t rank2_inc_col; int64_t rank2_inc_row; // // Methods // CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(TensorCoord const &extent, Layout const &layout_): layout(layout_) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Layout::kRank / 2; ++i) { stride_m[i] = OffsetBytes<Element>(layout_.stride()[i]); stride_n[i] = OffsetBytes<Element>(layout_.stride()[i + Layout::kRank / 2]); } if (kBigEndian) { // "Big Endian" scheme CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Layout::kRank / 2 - 1; ++i) { divmod_m[i] = FastDivmod(extent[i + 1]); divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2 + 1]); } } else { // "Little Endian" scheme CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Layout::kRank / 2 - 1; ++i) { divmod_m[i] = FastDivmod(extent[i]); divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2]); } } #if 0 // // Debug print statements to verify extents and strides are passed correctly. // printf("PredicatedTileIteratorAffine::Params() entered\n"); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Layout::kRank; ++i) { printf(" extent[%d]: %d\n", i, extent[i]); } for (int i = 0; i < Layout::kRank; ++i) { printf(" stride[%d]: %ld\n", i, layout_.stride()[i]); } printf("PredicatedTileIteratorAffine::Params() returning\n"); #endif } CUTLASS_HOST_DEVICE Params(Layout const &layout_): layout(layout_) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Layout::kRank / 2; ++i) { stride_m[i] = OffsetBytes<Element>(layout_.stride()[i]); stride_n[i] = OffsetBytes<Element>(layout_.stride()[i + Layout::kRank / 2]); } rank2_inc_col = ThreadMap::Delta::kColumn * stride_n[0]; rank2_inc_row = ThreadMap::Delta::kRow * stride_m[0]; } }; /// Mask object struct Mask { static int const kCount = ThreadMap::Iterations::kColumn; /// Predicate state bool predicates[kCount]; // // Mask // CUTLASS_HOST_DEVICE Mask() { enable(); } ///< Efficiently disables all accesses guarded by mask CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = false; } } ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask CUTLASS_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = true; } } }; private: // // Data members // /// Parameters structure containing reference and precomputed state. Params params_; /// Byte-level pointer uint8_t *byte_pointer_; /// Array of boolean values to contain steady-state predicates Mask mask_; /// Extent of the matrix tile in rows Index extent_row_; /// Extent of the matrix tile in columns Index extent_col_; /// A thread's starting row position (assuming steady-state predicates have been computed) Index thread_start_row_; /// A thread's starting column position (assuming steady-state predicates have been computed) Index thread_start_column_; /// Internal state counter int state_[3]; /// Offsets in columns, cached for performance int64_t offset_modes_n_[ThreadMap::Iterations::kColumn]; // // Static asserts about internal strides // static_assert(sizeof(extent_row_) == 4, "Expected 32b extents"); static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents"); private: // // Methods // public: // // Methods // /// Constructor CUTLASS_DEVICE PredicatedTileIteratorAffineRankN( Params const & params, Element *pointer, MatrixCoord extent, int thread_idx, MatrixCoord threadblock_offset = MatrixCoord(), int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ): params_(params) { MatrixCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset; extent_row_ = extent.row(); extent_col_ = extent.column(); thread_start_row_ = thread_offset.row(); thread_start_column_ = thread_offset.column(); if (Layout::kRank > 2) { // Initialize predicates CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) { // // Compute coordinate and decompose into N modes // int coord_n = thread_start_column_ + c * ThreadMap::Delta::kColumn; mask_.predicates[c] = coord_n < extent.column(); Coord<Layout::kRank / 2, Index> modes_n; int64_t offset_modes_n = 0; if (kBigEndian) { modes_n = CoordinateDecomposition<Layout::kRank / 2>(coord_n, params_.divmod_n); offset_modes_n = dot(modes_n, params_.stride_n); } else { modes_n = CoordinateDecompositionLittleEndian<Layout::kRank / 2>(coord_n, params_.divmod_n); offset_modes_n = dot(modes_n, params_.stride_n); } offset_modes_n_[c] = offset_modes_n; } if (!pointer) { mask_.clear(); } } // Initialize pointer byte_pointer_ = reinterpret_cast<uint8_t *>(pointer); // Initialize internal state counter state_[0] = state_[1] = state_[2] = 0; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment &frag, int64_t byte_offset) { uint8_t const *byte_pointer = byte_pointer_; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { int row_begin = thread_start_row_ + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; int64_t offset_modes_m = row_begin * params_.stride_m[0]; CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); // // Compute coordinate and decompose into M modes // int coord_m = row * ThreadMap::Delta::kRow + row_begin; Coord<Layout::kRank / 2, Index> modes_m; if (Layout::kRank > 2) { if (kBigEndian) { modes_m = CoordinateDecomposition<Layout::kRank / 2>(coord_m, params_.divmod_m); } else { modes_m = CoordinateDecompositionLittleEndian<Layout::kRank / 2>(coord_m, params_.divmod_m); } offset_modes_m = dot(modes_m, params_.stride_m); } // // Compute the offset due to modes M // bool row_guard = (coord_m < extent_row_); int64_t offset_modes_n = thread_start_column_ * params_.stride_n[0]; CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { // // Compute coordinate and decompose into N modes // if (Layout::kRank > 2) { offset_modes_n = offset_modes_n_[column]; } // // Compute the pointer and access // bool guard; if (Layout::kRank > 2) { guard = row_guard && mask_.predicates[column]; } else { guard = (coord_m < extent_row_) && ((thread_start_column_ + ThreadMap::Delta::kColumn * column) < extent_col_); } cutlass::arch::global_load< AccessType, sizeof(AccessType) >( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)(byte_pointer + offset_modes_m + offset_modes_n + byte_offset), guard ); if (Layout::kRank == 2) { offset_modes_n += params_.rank2_inc_col; } } if (Layout::kRank == 2) { offset_modes_m += params_.rank2_inc_row; } } } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_byte_offset(frag, 0); } /// Stores a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) { uint8_t *byte_pointer = byte_pointer_; AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { int row_begin = thread_start_row_ + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; int64_t offset_modes_m = row_begin * params_.stride_m[0]; CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); // // Compute coordinate and decompose into M modes // int coord_m = row * ThreadMap::Delta::kRow + row_begin; Coord<Layout::kRank / 2, Index> modes_m; if (Layout::kRank > 2) { if (kBigEndian) { modes_m = CoordinateDecomposition<Layout::kRank / 2>(coord_m, params_.divmod_m); } else { modes_m = CoordinateDecompositionLittleEndian<Layout::kRank / 2>(coord_m, params_.divmod_m); } offset_modes_m = dot(modes_m, params_.stride_m); } // // Compute the offset due to modes M // bool row_guard = (coord_m < extent_row_); int64_t offset_modes_n = thread_start_column_ * params_.stride_n[0]; CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { // // Compute coordinate and decompose into N modes // if (Layout::kRank > 2) { offset_modes_n = offset_modes_n_[column]; } // // Compute the pointer and access // bool guard; if (Layout::kRank > 2) { guard = row_guard && mask_.predicates[column]; } else { guard = (coord_m < extent_row_) && ((thread_start_column_ + ThreadMap::Delta::kColumn * column) < extent_col_); } cutlass::arch::global_store<AccessType, sizeof(AccessType)>( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)(byte_pointer + offset_modes_m + offset_modes_n + byte_offset), guard); if (Layout::kRank == 2) { offset_modes_n += params_.rank2_inc_col; } } if (Layout::kRank == 2) { offset_modes_m += params_.rank2_inc_row; } } } } } /// Stores a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_byte_offset(frag, 0); } /// Advances to the next position to load or store CUTLASS_HOST_DEVICE PredicatedTileIteratorAffineRankN &operator++() { ++state_[0]; thread_start_row_ += ThreadMap::Shape::kRow; if (state_[0] == ThreadMap::Count::kRow) { state_[0] = 0; ++state_[1]; thread_start_row_ += (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow; if (state_[1] == ThreadMap::Count::kGroup) { state_[1] = 0; ++state_[2]; thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow; if (state_[2] == ThreadMap::Count::kCluster) { state_[2] = 0; } } } return *this; } ///< Efficiently disables all accesses guarded by mask CUTLASS_DEVICE void clear_mask() { mask_.clear(); } ///< Efficiently enables all accesses guarded by mask CUTLASS_DEVICE void enable_mask() { mask_.enable(); } ///< Sets the mask CUTLASS_DEVICE void get_mask(Mask &mask) { mask = mask_; } ///< Sets the mask CUTLASS_DEVICE void set_mask(Mask const &mask) { mask_ = mask; } }; /////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
18,821
C
29.555195
125
0.591042
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_with_reduction.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/threadblock/epilogue_with_reduction.h" #include "cutlass/layout/permute.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename OutputOp, typename ReductionOp, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute > struct DefaultEpilogueWithReductionTensorOp { /// Use defaults related to the existing epilogue using Base = DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, ElementsPerAccess >; /// Additional tensor tile iterator using TensorTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, typename OutputOp::ElementTensor >; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout >; /// Define the epilogue using Epilogue = EpilogueWithReduction< Shape, WarpMmaTensorOp, PartitionsK, OutputTileIterator, TensorTileIterator, typename WarpMmaTensorOp::ElementC, typename Base::AccumulatorFragmentIterator, typename Base::WarpTileIterator, typename Base::SharedLoadIterator, typename Base::OutputOp, ReductionOp, typename Base::Padding >; }; //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for TensorOps. template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename OutputOp, typename ReductionOp, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute > struct DefaultEpilogueWithReductionVoltaTensorOp { /// Use defaults related to the existing epilogue using Base = DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, ElementsPerAccess >; /// Additional tensor tile iterator using TensorTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, typename OutputOp::ElementTensor >; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout >; /// Define the epilogue using Epilogue = EpilogueWithReduction< Shape, WarpMmaTensorOp, PartitionsK, OutputTileIterator, TensorTileIterator, typename WarpMmaTensorOp::ElementC, typename Base::AccumulatorFragmentIterator, typename Base::WarpTileIterator, typename Base::SharedLoadIterator, typename Base::OutputOp, ReductionOp, typename Base::Padding >; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
5,763
C
31.382022
100
0.681763
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_thread_map_simt.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "predicated_tile_iterator.h" #include "cutlass/gemm/gemm.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for SIMT accumulator layouts template < typename ThreadblockShape_, typename WarpShape_, typename MmaSimtPolicy_, int PartitionsK, typename Element_, int ElementsPerAccess > struct DefaultThreadMapSimt { using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using MmaSimtPolicy = MmaSimtPolicy_; static int const kPartitionsK = PartitionsK; using Element = Element_; static int const kElementsPerAccess = ElementsPerAccess; // // Definitions // struct Detail { static int const kWarpSize = 32; static_assert( !(ThreadblockShape::kM % WarpShape::kM) && !(ThreadblockShape::kN % WarpShape::kN), "Divisibility"); /// Number of warps using WarpCount = gemm::GemmShape< ThreadblockShape::kM / WarpShape::kM, ThreadblockShape::kN / WarpShape::kN, kPartitionsK >; /// Computes number of thread-level matrix multiplies are needed to span a warp static int const kGroupCount = WarpShape::kM / (MmaSimtPolicy::WarpShape::kRow * MmaSimtPolicy::LaneMmaShape::kM); /// Number of participating threads static int const kThreads = WarpCount::kCount * kWarpSize; /// Number of iterations static int const kIterations = MmaSimtPolicy::LaneMmaShape::kM * kGroupCount; }; // // ThreadMap // /// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap using Type = OutputTileOptimalThreadMap< OutputTileShape< // Shape ThreadblockShape::kN, 1, MmaSimtPolicy::WarpShape::kRow, Detail::WarpCount::kM, 1>, OutputTileShape< // Count 1, MmaSimtPolicy::LaneMmaShape::kM, Detail::kGroupCount, 1, Detail::kIterations>, Detail::kThreads, kElementsPerAccess, sizeof_bits<Element>::value >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
4,409
C
33.453125
101
0.618961
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_gemm_k_reduction.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/functional.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/numeric_types.h" namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator template < typename ElementAccumulator_, typename ElementOutput_, typename ThreadBlockShape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) bool ReduceKForA_ > class EpilogueGemmKReduction { public: using ThreadBlockShape = ThreadBlockShape_; using WarpMmaOperator = WarpMmaOperator_; using WarpShape = typename WarpMmaOperator::Shape; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// Accumulator element using ElementAccumulator = ElementAccumulator_; /// Output element using ElementOutput = ElementOutput_; /// Output access size static int const kElementsPerAccess = 1; static bool const kReduceKForA = ReduceKForA_; static int const kThreadBlockSize = kReduceKForA ? ThreadBlockShape::kM : ThreadBlockShape::kN; static int const kWarpSize = kReduceKForA ? WarpShape::kM : WarpShape::kN; static int const kIterations = kWarpSize / 8; using FragmentAccumulator = Array<ElementAccumulator, kIterations>; private: int thread_offset_; ElementOutput* pointer_; int col_; public: /// Constructor CUTLASS_DEVICE EpilogueGemmKReduction( int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx, ///< Id of thread within warp int threadblock_offset, ElementOutput* pointer ) { col_ = lane_idx % 4; thread_offset_ = threadblock_offset * kThreadBlockSize + warp_idx * kWarpSize + lane_idx / 4 + col_ * 8; pointer_ = pointer + LongIndex(thread_offset_); } /// Streams the result to global memory CUTLASS_DEVICE void operator()( int size, FragmentAccumulator &gemm_k_with_reduction_accumulation, bool LoadForSerialSplitK ) { bool guard[kIterations / 4]; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kIterations / 4; ++i) { guard[i] = ((thread_offset_ + i * 32) < size); } Array<ElementOutput, kIterations / 4> source; source.clear(); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kIterations / 4; ++i) { ElementOutput *source_ptr = reinterpret_cast<ElementOutput *>(&source); cutlass::arch::global_load<ElementOutput, sizeof(ElementOutput)>( source_ptr[i], (void *)(pointer_ + i * 32), guard[i] && LoadForSerialSplitK); } FragmentAccumulator sum = gemm_k_with_reduction_accumulation; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kIterations; ++i) { sum[i] += __shfl_xor_sync(0xffffffff, sum[i], 1); sum[i] += __shfl_xor_sync(0xffffffff, sum[i], 2); } Array<ElementAccumulator, kIterations / 4> intermediate; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kIterations / 4; ++i) { if (col_ == 0) { intermediate[i] = sum[0 + i * 4]; } if (col_ == 1) { intermediate[i] = sum[1 + i * 4]; } if (col_ == 2) { intermediate[i] = sum[2 + i * 4]; } if (col_ == 3) { intermediate[i] = sum[3 + i * 4]; } } NumericArrayConverter<ElementAccumulator, ElementOutput, kIterations / 4> source_converter; Array<ElementAccumulator, kIterations / 4> converted_source = source_converter(source); plus<Array<ElementAccumulator, kIterations / 4>> plus_source; intermediate = plus_source(intermediate, converted_source); NumericArrayConverter<ElementOutput, ElementAccumulator, kIterations / 4> converter; Array<ElementOutput, kIterations / 4> result = converter(intermediate); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kIterations / 4; ++i) { cutlass::arch::global_store<ElementOutput, sizeof(ElementOutput)>(result[i], (void *)(pointer_ + i * 32), guard[i]); } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
7,401
C
33.751174
107
0.624645
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/shared_load_iterator.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load output tile from shared memory in epilogue. /// /// Satisfies: ReadableTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_, ///< Element data type int MaxAlignment = ThreadMap_::kElementsPerAccess * sizeof_bits<Element_>::value / 8 > class SharedLoadIterator { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::TileShape; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kMinAlignment = ThreadMap_::kElementsPerAccess * sizeof_bits<Element_>::value / 8; static int const kAlignment = (MaxAlignment < kMinAlignment ? MaxAlignment : kMinAlignment); static int const kThreads = ThreadMap::kThreads; /// Fragment object using Fragment = Array< Element, ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray< Element, ThreadMap::kElementsPerAccess, kAlignment>; /// Vector type used for SMEM loads using LoadType = AlignedArray< Element, const_min(128 / sizeof_bits<Element>::value, ThreadMap::kElementsPerAccess), const_min(16, kAlignment) >; static int const kLoadsPerAccess = AccessType::kElements / LoadType::kElements; private: // // Data members // /// Byte-level pointer uint8_t *byte_pointer_; /// Stride along adjacent rows int stride_; public: // // Methods // /// Constructor CUTLASS_DEVICE SharedLoadIterator( TensorRef ref, int thread_idx ): byte_pointer_(reinterpret_cast<uint8_t *>(ref.data())), stride_((ref.stride(0) * sizeof_bits<Element>::value) / 8) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx); // Initialize pointer byte_pointer_ += thread_offset.row() * stride_ + thread_offset.column() * sizeof(AccessType) / kElementsPerAccess; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_DEVICE void add_tile_offset(TensorCoord const &offset) { byte_pointer_ += offset.row() * Shape::kRow * stride_ + offset.column() * Shape::kColumn * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { uint8_t const *byte_pointer = byte_pointer_ + row * ThreadMap::Delta::kRow * stride_ + group * ThreadMap::Delta::kGroup* stride_ + cluster * ThreadMap::Delta::kCluster * stride_ + pointer_offset * sizeof_bits<Element>::value / 8; int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag); LoadType const *memory_pointer = reinterpret_cast<LoadType const *>(byte_pointer); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kLoadsPerAccess; ++v) { frag_ptr[frag_idx * kLoadsPerAccess + v] = memory_pointer[(column * ThreadMap::Delta::kColumn / kElementsPerAccess) * kLoadsPerAccess + v]; } } } } } } /// Loads a fragment from memory CUTLASS_DEVICE void set_smem_base_address(Index address) { } /// Loads a fragment CUTLASS_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
7,487
C
32.428571
112
0.637906
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/epilogue_base_streamk.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Basic subset of epilogue functionality for supporting StreamK decompositions */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/functional.h" #include "cutlass/block_striped.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// StreamK epilogue functionality for cross-block accumulator fragment reduction template < typename Shape, ///< Shape of threadblock tile (concept: GemmShape) int PartitionsK, typename WarpMmaOperator, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) typename AccumulatorFragmentIterator> ///< Iterator for enumerating fragments within the per-thread tile of raw accumulators class EpilogueBaseStreamK { protected: /// The per-thread tile of raw accumulators using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile; /// Number of warps using WarpCount = gemm::GemmShape< Shape::kM / WarpMmaOperator::Shape::kM, Shape::kN / WarpMmaOperator::Shape::kN, PartitionsK>; /// Number of threads per block static int const kBlockThreads = 32 * WarpCount::kCount; /// Numerical accumulation element type using ElementAccumulator = typename WarpMmaOperator::ElementC; /// Fragment type used by the accumulator tile's fragment iterator using AccumulatorFragment = typename AccumulatorFragmentIterator::Fragment; public: /// Number of AccumulatorTile fragments per thread static int const kAccumulatorFragments = AccumulatorFragmentIterator::Policy::kIterations; protected: /// Number of AccumulatorTile fragments per block output tile static int const kOutputTileFragments = kBlockThreads * kAccumulatorFragments; /// Block-striped transfer utility for sharing AccumulatorFragment using BlockStripedT = BlockStriped<kBlockThreads, AccumulatorFragment>; /// AccumulatorFragment stride in the shared workspace between different peer blocks (each thread block can share accumulators for up to two block output tiles) static const int kPeerFragmentStride = kOutputTileFragments * 2; public: /// Workspace bytes per thread block static size_t const kWorkspaceBytesPerBlock =sizeof(AccumulatorFragment) * kPeerFragmentStride; public: /// Thread index in the threadblock int thread_idx; public: /// Constructor CUTLASS_DEVICE EpilogueBaseStreamK( int thread_idx) ///< ID of a thread within the threadblock : thread_idx(thread_idx) {} /// Aggregates the accumulator sets shared by peer blocks in the global workspace CUTLASS_DEVICE void reduce( AccumulatorFragment &accum_fragment, ///< [out] sum of all shared accumulator fragments for these peer partials int peer_idx_begin, int peer_idx_end, int reduce_fragment_idx, void *workspace_ptr) { plus<AccumulatorFragment> add_fragments; AccumulatorFragment *fragment_workspace = reinterpret_cast<AccumulatorFragment *>(workspace_ptr); int fragment_offset = (peer_idx_begin * kPeerFragmentStride) + (reduce_fragment_idx * kBlockThreads); // Load first peer fragment BlockStripedT::load(accum_fragment, fragment_workspace + fragment_offset, this->thread_idx); fragment_offset += kPeerFragmentStride; // Move to next peer fragment_offset += kOutputTileFragments; // Move to the set of fragments for this peer's "non-started" output tile // Reduce fragments from additional peers #pragma unroll 2 for (; fragment_offset < peer_idx_end * kPeerFragmentStride; fragment_offset += kPeerFragmentStride) { // Load peer fragment AccumulatorFragment addend_fragment; BlockStripedT::load(addend_fragment, fragment_workspace + fragment_offset, this->thread_idx); // Add peer fragment accum_fragment = add_fragments(accum_fragment, addend_fragment); } } /// Shares the accumulator set with peers in the global workspace CUTLASS_DEVICE void share( int peer_idx, void *workspace_ptr, AccumulatorTile const &accumulators, bool started_tile) ///< Whether this thread block computed the first work volume for the current output tile { AccumulatorFragment *fragment_workspace = reinterpret_cast<AccumulatorFragment *>(workspace_ptr); int fragment_offset = peer_idx * kPeerFragmentStride; if (!started_tile) { // Move to the set of fragments for the "non-started" output tile fragment_offset += kOutputTileFragments; } AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // Convert raw accumulator tile to fragments and store CUTLASS_PRAGMA_UNROLL for (int iter = 0; iter < kAccumulatorFragments; ++iter) { // Acquire reordered accumulator fragment AccumulatorFragment accum_fragment; accum_fragment_iterator.load(accum_fragment); ++accum_fragment_iterator; // Store accumulator fragment BlockStripedT::store(fragment_workspace + fragment_offset, accum_fragment, this->thread_idx); fragment_offset += kBlockThreads; } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
7,455
C
36.656565
162
0.674983
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_planar_complex.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Constructs a default epilogue for planar complex outputs. This template reuses components for real-valued epilogues and applies them to planar complex output matrices. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/array_planar_complex.h" #include "cutlass/arch/arch.h" #include "cutlass/epilogue/thread/linear_combination_planar_complex.h" #include "cutlass/epilogue/threadblock/default_epilogue_simt.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" #include "cutlass/epilogue/threadblock/epilogue_planar_complex.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues. template < typename ThreadblockShape_, typename WarpMma_, typename OpcodeClass_, typename ArchTag_, int PartitionsK, typename OutputOp_, int ElementsPerAccess > struct DefaultEpiloguePlanarComplex; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues. template < typename ThreadblockShape_, typename WarpMmaOperator_, int PartitionsK, typename OutputOp_, int ElementsPerAccess > struct DefaultEpiloguePlanarComplex< ThreadblockShape_, WarpMmaOperator_, arch::OpClassTensorOp, arch::Sm70, PartitionsK, OutputOp_, ElementsPerAccess> { using RealEpilogue = DefaultEpilogueVoltaTensorOp< ThreadblockShape_, WarpMmaOperator_, PartitionsK, OutputOp_, ElementsPerAccess >; using Epilogue = EpiloguePlanarComplex< ThreadblockShape_, WarpMmaOperator_, PartitionsK, typename RealEpilogue::OutputTileIterator, typename RealEpilogue::AccumulatorFragmentIterator, typename RealEpilogue::WarpTileIterator, typename RealEpilogue::SharedLoadIterator, OutputOp_, typename RealEpilogue::Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues. template < typename ThreadblockShape_, typename WarpMmaOperator_, int PartitionsK, typename OutputOp_, int ElementsPerAccess > struct DefaultEpiloguePlanarComplex< ThreadblockShape_, WarpMmaOperator_, arch::OpClassTensorOp, arch::Sm75, PartitionsK, OutputOp_, ElementsPerAccess> { using RealEpilogue = DefaultEpilogueTensorOp< ThreadblockShape_, WarpMmaOperator_, PartitionsK, OutputOp_, ElementsPerAccess >; using Epilogue = EpiloguePlanarComplex< ThreadblockShape_, WarpMmaOperator_, PartitionsK, typename RealEpilogue::OutputTileIterator, typename RealEpilogue::AccumulatorFragmentIterator, typename RealEpilogue::WarpTileIterator, typename RealEpilogue::SharedLoadIterator, OutputOp_, typename RealEpilogue::Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues. template < typename ThreadblockShape_, typename WarpMmaOperator_, int PartitionsK, typename OutputOp_, int ElementsPerAccess > struct DefaultEpiloguePlanarComplex< ThreadblockShape_, WarpMmaOperator_, arch::OpClassTensorOp, arch::Sm80, PartitionsK, OutputOp_, ElementsPerAccess> { using RealEpilogue = DefaultEpilogueTensorOp< ThreadblockShape_, WarpMmaOperator_, PartitionsK, OutputOp_, ElementsPerAccess >; using Epilogue = EpiloguePlanarComplex< ThreadblockShape_, WarpMmaOperator_, PartitionsK, typename RealEpilogue::OutputTileIterator, typename RealEpilogue::AccumulatorFragmentIterator, typename RealEpilogue::WarpTileIterator, typename RealEpilogue::SharedLoadIterator, OutputOp_, typename RealEpilogue::Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues. template < typename ThreadblockShape_, typename WarpMmaOperator_, typename ArchTag_, int PartitionsK, typename OutputOp_, int ElementsPerAccess > struct DefaultEpiloguePlanarComplex< ThreadblockShape_, WarpMmaOperator_, arch::OpClassSimt, ArchTag_, PartitionsK, OutputOp_, ElementsPerAccess> { using RealEpilogue = DefaultEpilogueSimt< ThreadblockShape_, WarpMmaOperator_, OutputOp_, ElementsPerAccess >; using Epilogue = EpiloguePlanarComplex< ThreadblockShape_, WarpMmaOperator_, PartitionsK, typename RealEpilogue::OutputTileIterator, typename RealEpilogue::AccumulatorFragmentIterator, typename RealEpilogue::WarpTileIterator, typename RealEpilogue::SharedLoadIterator, OutputOp_, typename RealEpilogue::Padding >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
7,209
C
28.793388
100
0.654876
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/permute.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/memory.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { //////////////////////////////////////////////////////////////////////////////// namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load and store output tile from global memory in epilogue. /// /// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_, ///< Element data type bool ScatterD = false, ///< Scatter D operand or not typename PermuteDLayout = layout::NoPermute, ///< Permute D operand or not bool UseCUDAStore = false > class PredicatedTileIterator { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::Shape; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kThreads = ThreadMap::kThreads; static int const kIterations = ThreadMap::Count::kTile; static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0"); static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0"); static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0"); static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0"); /// Fragment object using Fragment = Array< Element, ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; // // Parameters struct // /// Uses a non-template class struct Params : PredicatedTileIteratorParams { using Base = PredicatedTileIteratorParams; CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Layout const &layout): PredicatedTileIteratorParams( layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess, make_OutputTileThreadMapDesc<ThreadMap>() ) { } CUTLASS_HOST_DEVICE Params(Base const &base) : Base(base) { } }; /// Mask object struct Mask { static int const kCount = ThreadMap::Iterations::kColumn; /// Predicate state bool predicates[kCount]; // // Mask // CUTLASS_HOST_DEVICE Mask() { enable(); } ///< Efficiently disables all accesses guarded by mask CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = false; } } ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask CUTLASS_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = true; } } }; private: // // Data members // /// Parameters structure containing reference and precomputed state. PredicatedTileIteratorParams params_; /// Byte-level pointer. This pointer is usually for both load() and store(), unless PermuteD is performed. When having PermuteD, byte_pointer_ is only for load(). uint8_t *byte_pointer_; /// Byte-level pointer for store(). Due to PermuteD Op, store_byte_pointer_ may be with different address computation compared to byte_pointer_. uint8_t *store_byte_pointer_; /// Array of boolean values to contain steady-state predicates Mask mask_; /// Extent of the matrix tile in rows Index extent_row_; /// Extent of the matrix tile in rows Index extent_column_; /// A thread's starting row position (assuming steady-state predicates have been computed) Index thread_start_row_; /// A thread's starting column Index thread_start_column_; /// Internal state counter int state_[3]; /// Scatter indices int const *indices_; /// Whether to perform Permute Op bool PermuteD; /// PermuteDLayout mutable PermuteDLayout permute_layout_; // // Static asserts about internal strides // static_assert(sizeof(extent_row_) == 4, "Expected 32b extents"); static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents"); static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides"); private: // // Methods // public: // // Methods // /// Constructor CUTLASS_DEVICE PredicatedTileIterator( PredicatedTileIteratorParams const & params, Element *pointer, TensorCoord extent, int thread_idx, TensorCoord threadblock_offset = TensorCoord(), int const *indices = nullptr ): params_(params), indices_(indices) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset; extent_row_ = extent.row(); extent_column_ = extent.column(); thread_start_row_ = thread_offset.row(); thread_start_column_ = thread_offset.column(); // Initialize predicates CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) { mask_.predicates[c] = ((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column()); } // Null pointer performs no accesses if (!pointer) { mask_.clear(); } if (ScatterD && !indices) { mask_.clear(); } // Initialize byte_pointer_ byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) + LongIndex(thread_offset.row()) * LongIndex(params_.stride) + LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess; if (ScatterD) { byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) + LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess; } // store_byte_pointer_ is set to be the same with byte_pointer_ unless PermuteD is used. store_byte_pointer_ = byte_pointer_; // Initialize PermuteD. If PermuteD is true, store_byte_pointer_ is initialized accordingly. if (platform::is_same<PermuteDLayout, layout::NoPermute>::value) { PermuteD = false; }else{ PermuteD = true; store_byte_pointer_ = reinterpret_cast<uint8_t *>(pointer); permute_layout_ = PermuteDLayout(extent, params_.stride * kElementsPerAccess / sizeof(AccessType)); } // Initialize internal state counter state_[0] = state_[1] = state_[2] = 0; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { store_byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment &frag, int64_t byte_offset) const { uint8_t *byte_pointer = byte_pointer_; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; bool row_guard = ((row_offset + thread_start_row_) < extent_row_); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); if (ScatterD && row_guard) { assert(indices_); memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset + LongIndex(indices_[row_offset + thread_start_row_]) * LongIndex(params_.stride)); } CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { bool guard = row_guard && mask_.predicates[column]; cutlass::arch::global_load< AccessType, sizeof(AccessType) >( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess], guard); } if (row + 1 < ThreadMap::Iterations::kRow) { if (!ScatterD) { byte_pointer += params_.increment_row; } } } if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; } } if (cluster + 1 < ThreadMap::Iterations::kCluster) { byte_pointer += params_.increment_cluster; } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Stores a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) const { uint8_t *byte_pointer = store_byte_pointer_; AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; bool row_guard = ((row_offset + thread_start_row_) < extent_row_); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); if (ScatterD && row_guard) { assert(indices_); memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset + LongIndex(indices_[row_offset + thread_start_row_]) * LongIndex(params_.stride)); } CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { bool guard = row_guard && mask_.predicates[column]; int col_offset = column * ThreadMap::Delta::kColumn; if (PermuteD) { int col = col_offset + thread_start_column_; int row = row_offset + thread_start_row_; TensorCoord init_coord(row, col); // Locate memory_pointer memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset + permute_layout_(init_coord) * sizeof(AccessType) / kElementsPerAccess); } if (UseCUDAStore) { if (guard) { memory_pointer[0] = frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column]; } } else { cutlass::arch::global_store<AccessType, sizeof(AccessType)>( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)&memory_pointer[0], guard); } if (!PermuteD) { memory_pointer += (ThreadMap::Delta::kColumn / kElementsPerAccess); } } if (row + 1 < ThreadMap::Iterations::kRow) { if (!ScatterD && !PermuteD) { byte_pointer += params_.increment_row; } } } if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; } } if (cluster + 1 < ThreadMap::Iterations::kCluster) { byte_pointer += params_.increment_cluster; } } } /// Stores a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) const { store_with_byte_offset(frag, 0); } /// Loads a fragment from memory CUTLASS_DEVICE void downsample_load_with_byte_offset(Fragment &frag, int64_t byte_offset, int convolution_P, int convolution_Q, int add_P, int add_Q, int problem_N) const { uint8_t *byte_pointer = byte_pointer_; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; bool row_guard = ((row_offset + thread_start_row_) < extent_row_); int output_row = row_offset + thread_start_row_; int output_N = output_row / (convolution_P * convolution_Q); int output_PQ = output_row % (convolution_P * convolution_Q); int output_P = output_PQ / convolution_Q; int output_Q = output_PQ % convolution_Q; int input_row = output_N * 2 * convolution_P * 2 * convolution_Q + (2 * output_P + add_P) * 2 * convolution_Q + 2 * output_Q + add_Q; int64_t byte_offset = (input_row-output_row)*problem_N*sizeof(float); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { bool guard = row_guard && mask_.predicates[column]; cutlass::arch::global_load< AccessType, sizeof(AccessType) >( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess], guard); } if (row + 1 < ThreadMap::Iterations::kRow) { byte_pointer += params_.increment_row; } } if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; } } if (cluster + 1 < ThreadMap::Iterations::kCluster) { byte_pointer += params_.increment_cluster; } } } /// Loads a fragment from memory CUTLASS_DEVICE void upsample_load_with_byte_offset(Fragment &frag, int64_t byte_offset, int convolution_P, int convolution_Q, int add_P, int add_Q, int problem_N) const { uint8_t *byte_pointer = byte_pointer_; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; bool row_guard = ((row_offset + thread_start_row_) < extent_row_); int output_row = row_offset + thread_start_row_; int output_N = output_row / (convolution_P * convolution_Q); int output_PQ = output_row % (convolution_P * convolution_Q); int output_P = output_PQ / convolution_Q; int output_Q = output_PQ % convolution_Q; int row_add_P = add_P; int row_add_Q = add_Q; if (output_P > convolution_P - 2) row_add_P = 0; if (output_Q > convolution_Q - 2) row_add_Q = 0; int input_row = output_N * (convolution_P/2) * (convolution_Q/2) + ((output_P + row_add_P)/2) * (convolution_Q/2) + (output_Q + row_add_Q)/2; int64_t byte_offset = (input_row-output_row)*problem_N*sizeof(float); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { bool guard = row_guard && mask_.predicates[column]; cutlass::arch::global_load< AccessType, sizeof(AccessType) >( frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], (void *)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess], guard); } if (row + 1 < ThreadMap::Iterations::kRow) { byte_pointer += params_.increment_row; } } if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; } } if (cluster + 1 < ThreadMap::Iterations::kCluster) { byte_pointer += params_.increment_cluster; } } } CUTLASS_DEVICE MatrixCoord thread_start() const { return MatrixCoord(thread_start_row_, thread_start_column_); } /// Need to get the thread start row from the tile iterator CUTLASS_DEVICE int32_t thread_start_row() const { return thread_start_row_; } /// Need to get the thread start row from the tile iterator CUTLASS_DEVICE int32_t thread_start_column() const { return thread_start_column_; } /// Extent of the matrix in rows CUTLASS_DEVICE Index extent_row() const { return extent_row_; } /// Extent of the matrix in columns CUTLASS_DEVICE Index extent_column() const { return extent_column_; } /// Advances to the next position to load or store CUTLASS_HOST_DEVICE PredicatedTileIterator &operator++() { ++state_[0]; if (!ScatterD && !PermuteD) { store_byte_pointer_ += params_.advance_row; } if (!ScatterD) { byte_pointer_ += params_.advance_row; } thread_start_row_ += ThreadMap::Shape::kRow; if (state_[0] == ThreadMap::Count::kRow) { state_[0] = 0; ++state_[1]; byte_pointer_ += params_.advance_group; store_byte_pointer_ += params_.advance_group; thread_start_row_ += (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow; if (state_[1] == ThreadMap::Count::kGroup) { state_[1] = 0; ++state_[2]; byte_pointer_ += params_.advance_cluster; store_byte_pointer_ += params_.advance_cluster; thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow; if (state_[2] == ThreadMap::Count::kCluster) { state_[2] = 0; byte_pointer_ += params_.advance_tile; store_byte_pointer_ += params_.advance_tile; thread_start_row_ += ThreadMap::Shape::kGroup * ThreadMap::Shape::kRow * ThreadMap::Shape::kCluster * ThreadMap::Shape::kTile; } } } return *this; } /// Advances a number of positions to load or store CUTLASS_HOST_DEVICE PredicatedTileIterator &operator+=(int increment) { // Row state_[0] += increment; int increment_row = state_[0] / ThreadMap::Count::kRow; state_[0] = state_[0] % ThreadMap::Count::kRow; byte_pointer_ += (params_.advance_row * increment); store_byte_pointer_ += (params_.advance_row * increment); thread_start_row_ += (ThreadMap::Shape::kRow * increment); // Group state_[1] += increment_row; int increment_group = state_[1] / ThreadMap::Count::kGroup; state_[1] = state_[1] % ThreadMap::Count::kGroup; byte_pointer_ += (params_.advance_group * increment_row); store_byte_pointer_ += (params_.advance_group * increment_row); thread_start_row_ += (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow * increment_row; // Cluster state_[2] += increment_group; int increment_cluster = state_[2] / ThreadMap::Count::kCluster; state_[2] = state_[2] % ThreadMap::Count::kCluster; byte_pointer_ += (params_.advance_cluster * increment_group); store_byte_pointer_ += (params_.advance_cluster * increment_group); thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow * increment_group; // Tile byte_pointer_ += (params_.advance_tile * increment_cluster); store_byte_pointer_ += (params_.advance_tile * increment_cluster); thread_start_row_ += ThreadMap::Shape::kGroup * ThreadMap::Shape::kRow * ThreadMap::Shape::kCluster * ThreadMap::Shape::kTile * increment_cluster; return *this; } ///< Efficiently disables all accesses guarded by mask CUTLASS_DEVICE void clear_mask() { mask_.clear(); } ///< Efficiently enables all accesses guarded by mask CUTLASS_DEVICE void enable_mask() { mask_.enable(); } ///< Sets the mask CUTLASS_DEVICE void get_mask(Mask &mask) const { mask = mask_; } ///< Sets the mask CUTLASS_DEVICE void set_mask(Mask const &mask) { mask_ = mask; } }; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load output tile from global memory in epilogue. /// /// Satisfies: ReadableTileIterator | InterleavedPredicatedTileIterator | ForwardTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_, ///< Element data type int InterleavedN ///< Number of Interleaved N > class InterleavedPredicatedTileIterator { public: using ThreadMap = ThreadMap_; using Element = Element_; using Layout = layout::ColumnMajorInterleaved<InterleavedN>; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = layout::PitchLinearCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kThreads = ThreadMap::kThreads; static int const kIterations = ThreadMap::Iterations::kCount; /// Fragment object using Fragment = Array<Element, ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; /// Uses a non-template class struct Params : InterleavedPredicatedTileIteratorParams { using Base = InterleavedPredicatedTileIteratorParams; CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Layout const &layout): Base( layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess, make_InterleavedPredicatedTileIteratorDesc<Element, ThreadMap>() ) { } CUTLASS_HOST_DEVICE Params(Base const &base) : Base(base) { } }; /// Mask object struct Mask { static int const kCount = (ThreadMap::Iterations::kContiguous < 8) ? 8 : ThreadMap::Iterations::kContiguous; /// Predicate state bool predicates[kCount]; // // Mask // CUTLASS_HOST_DEVICE Mask() { enable(); } ///< Efficiently disables all accesses guarded by mask CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = false; } } ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask CUTLASS_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = true; } } }; private: // // Data members // /// Parameters structure containing reference and precomputed state. Params params_; /// Byte-level pointer uint8_t *byte_pointer_; /// Array of boolean values to contain steady-state predicates Mask mask_; /// Extent of the matrix tile in columns Index extent_col_; /// A thread's starting column position (assuming steady-state predicates have /// been computed) Index thread_start_col_; /// Internal iteration counter int iteration_contiguous_; int iteration_strided_; private: // // Methods // public: // // Methods // /// Constructor CUTLASS_DEVICE InterleavedPredicatedTileIterator( Params const & params, Element *pointer, TensorCoord extent, int thread_idx, TensorCoord threadblock_offset, int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ): params_(params) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + TensorCoord(threadblock_offset.contiguous() * InterleavedN, threadblock_offset.strided() / InterleavedN); extent_col_ = extent.strided() / InterleavedN; thread_start_col_ = thread_offset.strided(); // Initialize predicates CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { mask_.predicates[c] = ((thread_offset.contiguous() + ThreadMap::Delta::kContiguous * c) < (extent.contiguous() * InterleavedN)); } // Initialize pointer byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) + LongIndex(thread_offset.strided()) * LongIndex(params_.stride) + LongIndex(thread_offset.contiguous()) * sizeof(AccessType) / kElementsPerAccess; // Initialize internal state counter iteration_contiguous_ = iteration_strided_ = 0; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { uint8_t *byte_pointer = byte_pointer_; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer); int col_offset = iteration_strided_ * ThreadMap::Delta::kStrided; bool col_guard = ((thread_start_col_ + col_offset) < extent_col_); bool guard = col_guard && mask_.predicates[iteration_contiguous_]; cutlass::arch::global_load< AccessType, sizeof(AccessType) >( *frag_ptr, (void *)memory_pointer, guard); } /// Stores a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { uint8_t *byte_pointer = byte_pointer_; AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer); int col_offset = iteration_strided_ * ThreadMap::Delta::kStrided; bool col_guard = ((thread_start_col_ + col_offset) < extent_col_); bool guard = col_guard && mask_.predicates[iteration_contiguous_]; cutlass::arch::global_store<AccessType, sizeof(AccessType)>( *frag_ptr, (void *)memory_pointer, guard); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int iteration) { iteration_contiguous_ = iteration % ThreadMap::Iterations::kContiguous; iteration_strided_ = iteration / ThreadMap::Iterations::kContiguous; } /// Advances to the next position to load or store CUTLASS_HOST_DEVICE InterleavedPredicatedTileIterator &operator++() { ++iteration_contiguous_; byte_pointer_ += params_.advance_row; if (iteration_contiguous_ == ThreadMap::Iterations::kContiguous) { iteration_contiguous_ = 0; ++iteration_strided_; byte_pointer_ += params_.advance_column; if (iteration_strided_ == ThreadMap::Iterations::kStrided) { iteration_strided_ = 0; } } return *this; } /// Advances a number of positions to load or store CUTLASS_HOST_DEVICE InterleavedPredicatedTileIterator &operator+=(int increment) { // Contiguous iteration_contiguous_ += increment; int increment_strided = iteration_contiguous_ / ThreadMap::Iterations::kContiguous; iteration_contiguous_ = iteration_contiguous_ % ThreadMap::Iterations::kContiguous; byte_pointer_ += (params_.advance_row * increment); // Strided iteration_strided_ += increment_strided; byte_pointer_ += (params_.advance_column * increment_strided); return *this; } ///< Efficiently disables all accesses guarded by mask CUTLASS_DEVICE void clear_mask() { mask_.clear(); } ///< Efficiently enables all accesses guarded by mask CUTLASS_DEVICE void enable_mask() { mask_.enable(); } ///< Sets the mask CUTLASS_DEVICE void get_mask(Mask &mask) { mask = mask_; } ///< Sets the mask CUTLASS_DEVICE void set_mask(Mask const &mask) { mask_ = mask; } }; /////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load output tile from global memory in epilogue. /// /// Satisfies: ReadableTileIterator | InterleavedMaskedTileIterator | ForwardTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_, ///< Element data type int InterleavedN ///< Number of Interleaved N > class InterleavedConvPredicatedTileIterator { public: using ThreadMap = ThreadMap_; using Element = Element_; using Layout = layout::TensorNCxHWx<InterleavedN>; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = Tensor4DCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kThreads = ThreadMap::kThreads; static int const kIterations = ThreadMap::Iterations::kCount; /// Fragment object using Fragment = Array<Element, ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; // // Parameters struct // struct Params { // // Data members // LongIndex stride_col; ///< stride in bytes between columns LongIndex stride_row; ///< stride in bytes between rows // // Methods // CUTLASS_HOST_DEVICE Status initialize(typename Layout::Stride stride_) { stride_col = stride_[1]; stride_row = stride_[2]; return Status::kSuccess; } CUTLASS_HOST_DEVICE Params() { initialize(cutlass::make_Coord(0, 0, 0)); } CUTLASS_HOST_DEVICE Params(Layout const &layout) { initialize(layout.stride()); } }; /// Mask object struct Mask { static int const kCount = (ThreadMap::Iterations::kRow < 8) ? 8 : ThreadMap::Iterations::kRow; /// Predicate state bool predicates[kCount]; // // Mask // CUTLASS_HOST_DEVICE Mask() { enable(); } ///< Efficiently disables all accesses guarded by mask CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = false; } } ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask CUTLASS_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = true; } } }; private: // // Data members // /// Parameters structure containing reference and precomputed state. Params params_; /// Byte-level pointer uint8_t *byte_pointer_; /// Array of boolean values to contain steady-state predicates Mask mask_; /// Extent of the matrix tile in columns Index extent_col_; /// Extent of the matrix tile in rows Index extent_row_; /// Extent of the matrix tile in pq Index extent_pq_; /// A thread's starting row position (assuming steady-state predicates have /// been computed) Index thread_start_row_; /// A thread's starting column position (assuming steady-state predicates have /// been computed) Index thread_start_col_; /// Internal iteration counter LongIndex iteration_row_; LongIndex iteration_col_; uint32_t pq_mul_; uint32_t pq_shr_; private: // // Methods // public: // // Methods // /// Constructor CUTLASS_DEVICE InterleavedConvPredicatedTileIterator( Params const & params, Element *pointer, TensorCoord extent, int thread_idx, MatrixCoord threadblock_offset ): params_(params) { MatrixCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset; extent_col_ = extent.c(); extent_pq_ = extent.h() * extent.w(); extent_row_ = extent.n() * extent_pq_; find_divisor(pq_mul_, pq_shr_, extent_pq_); thread_start_row_ = thread_offset.row(); thread_start_col_ = thread_offset.column(); // Initialize predicates CUTLASS_PRAGMA_UNROLL for (int r = 0; r < ThreadMap::Iterations::kRow; ++r) { mask_.predicates[r] = ((thread_offset.row() + ThreadMap::Delta::kRow * r) < extent_row_); } // Initialize pointer byte_pointer_ = reinterpret_cast<uint8_t *>(pointer) + ((thread_start_col_ / InterleavedN) * params_.stride_col + (thread_start_col_ % InterleavedN)) * sizeof_bits<Element>::value / 8; // Initialize internal state counter iteration_row_ = iteration_col_ = 0; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { int col_offset = iteration_col_ * ThreadMap::Delta::kColumn; bool col_guard = ((thread_start_col_ + col_offset) < extent_col_); bool guard = col_guard && mask_.predicates[iteration_row_]; int n, pq_rem; fast_divmod(n, pq_rem, thread_start_row_ + iteration_row_ * ThreadMap::Delta::kRow, extent_pq_, pq_mul_, pq_shr_); uint8_t *byte_pointer = byte_pointer_ + (n * params_.stride_row + pq_rem * InterleavedN) * sizeof_bits<Element>::value / 8; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); AccessType const *memory_pointer = reinterpret_cast<AccessType const *>(byte_pointer); cutlass::arch::global_load< AccessType, sizeof(AccessType) >( *frag_ptr, (void *)memory_pointer, guard); } /// Stores a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { int col_offset = iteration_col_ * ThreadMap::Delta::kColumn; bool col_guard = ((thread_start_col_ + col_offset) < extent_col_); bool guard = col_guard && mask_.predicates[iteration_row_]; int n, pq_rem; fast_divmod(n, pq_rem, thread_start_row_ + iteration_row_ * ThreadMap::Delta::kRow, extent_pq_, pq_mul_, pq_shr_); uint8_t *byte_pointer = byte_pointer_ + (n * params_.stride_row + pq_rem * InterleavedN) * sizeof_bits<Element>::value / 8; AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer); cutlass::arch::global_store<AccessType, sizeof(AccessType)>( *frag_ptr, (void *)memory_pointer, guard); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int iteration) { iteration_row_ = iteration % ThreadMap::Iterations::kRow; iteration_col_ = iteration / ThreadMap::Iterations::kRow; } /// Advances to the next position to load or store CUTLASS_HOST_DEVICE InterleavedConvPredicatedTileIterator &operator++() { ++iteration_row_; if (iteration_row_ == ThreadMap::Iterations::kRow) { iteration_row_ = 0; ++iteration_col_; byte_pointer_ += params_.stride_col; if (iteration_col_ == ThreadMap::Iterations::kColumn) { iteration_col_ = 0; } } return *this; } ///< Efficiently disables all accesses guarded by mask CUTLASS_DEVICE void clear_mask() { mask_.clear(); } ///< Efficiently enables all accesses guarded by mask CUTLASS_DEVICE void enable_mask() { mask_.enable(); } ///< Sets the mask CUTLASS_DEVICE void get_mask(Mask &mask) { mask = mask_; } ///< Sets the mask CUTLASS_DEVICE void set_mask(Mask const &mask) { mask_ = mask; } }; /////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
40,870
C
29.23003
164
0.618106
NVIDIA/warp/warp/native/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_direct_conv.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/permute.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/memory.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h" #include "cutlass/conv/conv2d_problem_size.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { //////////////////////////////////////////////////////////////////////////////// namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load and store output tile from global memory in epilogue. /// /// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: PitchLinearThreadMap) typename Element_, ///< Element data type typename ThreadOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1>, typename ThreadBlockOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1> > class PredicatedTileIteratorDirectConv { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::Shape; using ThreadOutputShape = ThreadOutputShape_; using ThreadBlockOutputShape = ThreadBlockOutputShape_; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kThreads = ThreadMap::kThreads; using ConvProblemSize = typename cutlass::conv::Conv2dProblemSize; /// Fragment object using Fragment = Array<Element, ThreadMap::Iterations::kCount * kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, kElementsPerAccess>; static int const kLoadsPerAccess = AccessType::kElements / AccessType::kElements; using ThreadTileCount = MatrixShape< ThreadBlockOutputShape::kH / ThreadOutputShape::kH, ThreadBlockOutputShape::kW / ThreadOutputShape::kW >; // // Parameters struct // /// Uses a non-template class struct Params : PredicatedTileIteratorDirect2dConvParams { using Base = PredicatedTileIteratorDirect2dConvParams; CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Layout const &layout, cutlass::conv::Conv2dProblemSize const &problem_size): PredicatedTileIteratorDirect2dConvParams( layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess, problem_size, {ThreadBlockOutputShape::kH, ThreadBlockOutputShape::kW} ) { } CUTLASS_HOST_DEVICE Params(Base const &base) : Base(base) { } }; /// Mask object struct Mask { static int const kCount = ThreadMap::Iterations::kContiguous; /// Predicate state bool predicates[kCount]; // // Mask // CUTLASS_HOST_DEVICE Mask() { enable(); } ///< Efficiently disables all accesses guarded by mask CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = false; } } ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask CUTLASS_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = true; } } }; private: // // Data members // /// Parameters structure containing reference and precomputed state. PredicatedTileIteratorDirect2dConvParams params_; /// Byte-level pointer uint8_t *byte_pointer_; /// Element *pointer_; /// Array of boolean values to contain steady-state predicates Mask mask_; /// Extent of the matrix tile in rows Index extent_row_; /// Extent of the matrix tile in rows Index extent_column_; /// A thread's starting row position (assuming steady-state predicates have been computed) Index thread_start_row_; /// A thread's starting column Index thread_start_column_; /// Initial thread ouput location int thread_start_n_, thread_start_p_, thread_start_q_; /// Current threadblock tile index int tile_index_; // // Static asserts about internal strides // static_assert(sizeof(extent_row_) == 4, "Expected 32b extents"); static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents"); static_assert(sizeof(PredicatedTileIteratorDirect2dConvParams::stride) == 8, "Expected 64b strides"); private: // // Methods // public: // // Methods // /// Constructor CUTLASS_DEVICE PredicatedTileIteratorDirectConv( PredicatedTileIteratorDirect2dConvParams const & params, Element *pointer, TensorCoord extent, int thread_idx, TensorCoord threadblock_offset = TensorCoord() ): params_(params), pointer_(pointer) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx); extent_row_ = extent.row(); extent_column_ = extent.column(); // stride dim (PQ) thread_start_row_ = thread_offset.column(); // contiguous dim (Channels) thread_start_column_ = threadblock_offset.column() + thread_offset.row(); tile_index_ = threadblock_offset.row(); set_tile_index(0); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void set_tile_index(const int index) { int residual; params_.pq_divmod(thread_start_n_, residual, tile_index_ + index); params_.q_divmod(thread_start_p_, thread_start_q_, residual); // Compute the base output coord of ThreadBlock thread_start_p_ *= ThreadBlockOutputShape::kH; thread_start_q_ *= ThreadBlockOutputShape::kW; // Initialize predicates CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { mask_.predicates[c] = ((thread_start_column_ + c * ThreadMap::Delta::kContiguous) < extent_column_); } // Null pointer performs no accesses if (!pointer_) { mask_.clear(); } } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment &frag, int64_t byte_offset) const { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int frag_base_idx = s * ThreadMap::Iterations::kContiguous + c; int current_row = thread_start_row_ + s * ThreadMap::Delta::kStrided; int p = current_row / ThreadBlockOutputShape::kW; int q = current_row % ThreadBlockOutputShape::kW; int current_p = thread_start_p_ + p; int current_q = thread_start_q_ + q; bool row_guard = (current_p) < params_.P && (current_q) < params_.Q && (thread_start_n_ < params_.N) && current_row < ThreadMap::Shape::kStrided; int output_row_offset = thread_start_n_ * params_.stride_n + current_p * params_.stride_p + current_q; uint8_t *byte_pointer = reinterpret_cast<uint8_t *>(pointer_) + LongIndex(output_row_offset) * LongIndex(params_.stride) + LongIndex(thread_start_column_ + c * ThreadMap::Delta::kContiguous) * sizeof(AccessType) / kElementsPerAccess; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); bool guard = row_guard && mask_.predicates[c]; cutlass::arch::global_load<AccessType, sizeof(AccessType)>( frag_ptr[frag_base_idx], (void *)&memory_pointer[0], guard); } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Stores a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) const { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int frag_base_idx = s * ThreadMap::Iterations::kContiguous + c; int current_row = thread_start_row_ + s * ThreadMap::Delta::kStrided; int p = current_row / ThreadBlockOutputShape::kW; int q = current_row % ThreadBlockOutputShape::kW; int current_p = thread_start_p_ + p; int current_q = thread_start_q_ + q; bool row_guard = (current_p) < params_.P && (current_q) < params_.Q && (thread_start_n_ < params_.N) && current_row < ThreadMap::Shape::kStrided; int output_row_offset = thread_start_n_ * params_.stride_n + current_p * params_.stride_p + current_q; uint8_t *byte_pointer = reinterpret_cast<uint8_t *>(pointer_) + LongIndex(output_row_offset) * LongIndex(params_.stride) + LongIndex(thread_start_column_ + c * ThreadMap::Delta::kContiguous) * sizeof(AccessType) / kElementsPerAccess; AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); bool guard = row_guard && mask_.predicates[c]; cutlass::arch::global_store<AccessType, sizeof(AccessType)>( frag_ptr[frag_base_idx], (void *)&memory_pointer[0], guard); } } } /// Stores a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) const { store_with_byte_offset(frag, 0); } CUTLASS_DEVICE MatrixCoord thread_start() const { return MatrixCoord(thread_start_row_, thread_start_column_); } /// Need to get the thread start row from the tile iterator CUTLASS_DEVICE int32_t thread_start_row() const { return thread_start_row_; } /// Need to get the thread start row from the tile iterator CUTLASS_DEVICE int32_t thread_start_column() const { return thread_start_column_; } /// Extent of the matrix in rows CUTLASS_DEVICE Index extent_row() const { return extent_row_; } /// Extent of the matrix in columns CUTLASS_DEVICE Index extent_column() const { return extent_column_; } /// Advances to the next position to load or store CUTLASS_HOST_DEVICE PredicatedTileIteratorDirectConv &operator++() { // do nothing return *this; } ///< Efficiently disables all accesses guarded by mask CUTLASS_DEVICE void clear_mask() { mask_.clear(); } ///< Efficiently enables all accesses guarded by mask CUTLASS_DEVICE void enable_mask() { mask_.enable(); } ///< Sets the mask CUTLASS_DEVICE void get_mask(Mask &mask) const { mask = mask_; } ///< Sets the mask CUTLASS_DEVICE void set_mask(Mask const &mask) { mask_ = mask; } }; /////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
13,872
C
30.105381
103
0.649438